1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  48 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  49 
  50 #include "runtime/vmThread.hpp"
  51 #include "services/mallocTracker.hpp"
  52 
  53 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  54 
  55 #ifdef ASSERT
  56 template <class T>
  57 void AssertToSpaceClosure::do_oop_nv(T* p) {
  58   T o = oopDesc::load_heap_oop(p);
  59   if (! oopDesc::is_null(o)) {
  60     oop obj = oopDesc::decode_heap_oop_not_null(o);
  61     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  62            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  63            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  64   }
  65 }
  66 
  67 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  68 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  69 #endif
  70 
  71 const char* ShenandoahHeap::name() const {
  72   return "Shenandoah";
  73 }
  74 
  75 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  76   HeapWord* cur = NULL;
  77   for (cur = start; cur < end; cur++) {
  78     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  79   }
  80 }
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahHeapRegionSet* _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap0_base;
  88   char* _bitmap1_base;
  89 public:
  90   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  91                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  92                          size_t page_size) :
  93     AbstractGangTask("Shenandoah PreTouch",
  94                      Universe::is_fully_initialized() ? GCId::current_raw() :
  95                                                         // During VM initialization there is
  96                                                         // no GC cycle that this task can be
  97                                                         // associated with.
  98                                                         GCId::undefined()),
  99     _bitmap0_base(bitmap0_base),
 100     _bitmap1_base(bitmap1_base),
 101     _regions(regions),
 102     _bitmap_size(bitmap_size),
 103     _page_size(page_size) {
 104     _regions->clear_current_index();
 105   };
 106 
 107   virtual void work(uint worker_id) {
 108     ShenandoahHeapRegion* r = _regions->claim_next();
 109     while (r != NULL) {
 110       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 111                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 112       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 113 
 114       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 115       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 116       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 117 
 118       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 119                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 120       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 121 
 122       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 123                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 124       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 125 
 126       r = _regions->claim_next();
 127     }
 128   }
 129 };
 130 
 131 jint ShenandoahHeap::initialize() {
 132   CollectedHeap::pre_initialize();
 133 
 134   BrooksPointer::initial_checks();
 135 
 136   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 137   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 138   size_t heap_alignment = collector_policy()->heap_alignment();
 139 
 140   Universe::check_alignment(max_byte_size,
 141                             ShenandoahHeapRegion::region_size_bytes(),
 142                             "shenandoah heap");
 143   Universe::check_alignment(init_byte_size,
 144                             ShenandoahHeapRegion::region_size_bytes(),
 145                             "shenandoah heap");
 146 
 147   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 148                                                  heap_alignment);
 149   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 150 
 151   set_barrier_set(new ShenandoahBarrierSet(this));
 152   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 153   _storage.initialize(pgc_rs, init_byte_size);
 154 
 155   _num_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   _max_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 157   _initialSize = _num_regions * ShenandoahHeapRegion::region_size_bytes();
 158   size_t regionSizeWords = ShenandoahHeapRegion::region_size_bytes() / HeapWordSize;
 159   assert(init_byte_size == _initialSize, "tautology");
 160   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 161   _collection_set = new ShenandoahCollectionSet(_max_regions);
 162   _free_regions = new ShenandoahFreeSet(_max_regions);
 163 
 164   // Initialize fast collection set test structure.
 165   _in_cset_fast_test_length = _max_regions;
 166   _in_cset_fast_test_base =
 167                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 168   _in_cset_fast_test = _in_cset_fast_test_base -
 169                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 170 
 171   _next_top_at_mark_starts_base =
 172                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 173   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 174                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 175 
 176   _complete_top_at_mark_starts_base =
 177                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 178   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 179                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 180 
 181   size_t i = 0;
 182   for (i = 0; i < _num_regions; i++) {
 183     _in_cset_fast_test_base[i] = false; // Not in cset
 184     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 185     _complete_top_at_mark_starts_base[i] = bottom;
 186     _next_top_at_mark_starts_base[i] = bottom;
 187   }
 188 
 189   {
 190     ShenandoahHeapLock lock(this);
 191     for (i = 0; i < _num_regions; i++) {
 192       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 193                                                                regionSizeWords * i, regionSizeWords, i);
 194       _free_regions->add_region(current);
 195       _ordered_regions->add_region(current);
 196     }
 197   }
 198   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 199   assert((((size_t) base()) &
 200           (ShenandoahHeapRegion::region_size_bytes() - 1)) == 0,
 201          "misaligned heap: "PTR_FORMAT, p2i(base()));
 202 
 203   if (log_is_enabled(Trace, gc, region)) {
 204     ResourceMark rm;
 205     outputStream* out = Log(gc, region)::trace_stream();
 206     log_trace(gc, region)("All Regions");
 207     _ordered_regions->print(out);
 208     log_trace(gc, region)("Free Regions");
 209     _free_regions->print(out);
 210   }
 211 
 212   _recycled_regions = NEW_C_HEAP_ARRAY(size_t, _max_regions, mtGC);
 213   _recycled_region_count = 0;
 214 
 215   // The call below uses stuff (the SATB* things) that are in G1, but probably
 216   // belong into a shared location.
 217   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 218                                                SATB_Q_FL_lock,
 219                                                20 /*G1SATBProcessCompletedThreshold */,
 220                                                Shared_SATB_Q_lock);
 221 
 222   // Reserve space for prev and next bitmap.
 223   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 224   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 225 
 226   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 227 
 228   ReservedSpace bitmap0(_bitmap_size, page_size);
 229   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 230   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 231   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 232 
 233   ReservedSpace bitmap1(_bitmap_size, page_size);
 234   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 235   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 236   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 237 
 238   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 239     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 240     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 241                               "couldn't allocate verification bitmap");
 242     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 243     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 244     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 245   }
 246 
 247   if (ShenandoahAlwaysPreTouch) {
 248     assert (!AlwaysPreTouch, "Should have been overridden");
 249 
 250     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 251     // before initialize() below zeroes it with initializing thread. For any given region,
 252     // we touch the region and the corresponding bitmaps from the same thread.
 253 
 254     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 255                        _ordered_regions->count(), page_size);
 256     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 257     _workers->run_task(&cl);
 258   }
 259 
 260   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 261   _complete_mark_bit_map = &_mark_bit_map0;
 262 
 263   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 264   _next_mark_bit_map = &_mark_bit_map1;
 265 
 266   _connection_matrix = UseShenandoahMatrix ?
 267                        new ShenandoahConnectionMatrix(_max_regions) :
 268                        NULL;
 269 
 270   _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
 271                 new ShenandoahPartialGC(this, _max_regions) :
 272                 NULL;
 273 
 274   _monitoring_support = new ShenandoahMonitoringSupport(this);
 275 
 276   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 277 
 278   ShenandoahMarkCompact::initialize();
 279 
 280   return JNI_OK;
 281 }
 282 
 283 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 284   CollectedHeap(),
 285   _shenandoah_policy(policy),
 286   _concurrent_mark_in_progress(0),
 287   _evacuation_in_progress(0),
 288   _full_gc_in_progress(false),
 289   _update_refs_in_progress(false),
 290   _free_regions(NULL),
 291   _collection_set(NULL),
 292   _bytes_allocated_since_cm(0),
 293   _bytes_allocated_during_cm(0),
 294   _allocated_last_gc(0),
 295   _used_start_gc(0),
 296   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 297   _ref_processor(NULL),
 298   _in_cset_fast_test(NULL),
 299   _in_cset_fast_test_base(NULL),
 300   _next_top_at_mark_starts(NULL),
 301   _next_top_at_mark_starts_base(NULL),
 302   _complete_top_at_mark_starts(NULL),
 303   _complete_top_at_mark_starts_base(NULL),
 304   _mark_bit_map0(),
 305   _mark_bit_map1(),
 306   _connection_matrix(NULL),
 307   _cancelled_concgc(false),
 308   _need_update_refs(false),
 309   _need_reset_bitmaps(false),
 310   _heap_lock(0),
 311 #ifdef ASSERT
 312   _heap_lock_owner(NULL),
 313 #endif
 314   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 315 
 316 {
 317   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 318   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 319   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 320 
 321   _scm = new ShenandoahConcurrentMark();
 322   _used = 0;
 323 
 324   _max_workers = MAX2(_max_workers, 1U);
 325   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 326                             /* are_GC_task_threads */true,
 327                             /* are_ConcurrentGC_threads */false);
 328   if (_workers == NULL) {
 329     vm_exit_during_initialization("Failed necessary allocation.");
 330   } else {
 331     _workers->initialize_workers();
 332   }
 333 }
 334 
 335 class ResetNextBitmapTask : public AbstractGangTask {
 336 private:
 337   ShenandoahHeapRegionSet* _regions;
 338 
 339 public:
 340   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 341     AbstractGangTask("Parallel Reset Bitmap Task"),
 342     _regions(regions) {
 343     _regions->clear_current_index();
 344   }
 345 
 346   void work(uint worker_id) {
 347     ShenandoahHeapRegion* region = _regions->claim_next();
 348     ShenandoahHeap* heap = ShenandoahHeap::heap();
 349     while (region != NULL) {
 350       HeapWord* bottom = region->bottom();
 351       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 352       if (top > bottom) {
 353         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 354       }
 355       region = _regions->claim_next();
 356     }
 357   }
 358 };
 359 
 360 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 361   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 362   workers->run_task(&task);
 363 }
 364 
 365 class ResetCompleteBitmapTask : public AbstractGangTask {
 366 private:
 367   ShenandoahHeapRegionSet* _regions;
 368 
 369 public:
 370   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 371     AbstractGangTask("Parallel Reset Bitmap Task"),
 372     _regions(regions) {
 373     _regions->clear_current_index();
 374   }
 375 
 376   void work(uint worker_id) {
 377     ShenandoahHeapRegion* region = _regions->claim_next();
 378     ShenandoahHeap* heap = ShenandoahHeap::heap();
 379     while (region != NULL) {
 380       HeapWord* bottom = region->bottom();
 381       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 382       if (top > bottom) {
 383         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 384       }
 385       region = _regions->claim_next();
 386     }
 387   }
 388 };
 389 
 390 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 391   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 392   workers->run_task(&task);
 393 }
 394 
 395 bool ShenandoahHeap::is_next_bitmap_clear() {
 396   HeapWord* start = _ordered_regions->bottom();
 397   HeapWord* end = _ordered_regions->end();
 398   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 399 }
 400 
 401 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 402   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 403 }
 404 
 405 void ShenandoahHeap::print_on(outputStream* st) const {
 406   st->print("Shenandoah Heap");
 407   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 408   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 409             p2i(reserved_region().start()),
 410             p2i(reserved_region().end()));
 411   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::region_size_bytes() / K);
 412   if (_concurrent_mark_in_progress) {
 413     st->print("marking ");
 414   }
 415   if (_evacuation_in_progress) {
 416     st->print("evacuating ");
 417   }
 418   if (cancelled_concgc()) {
 419     st->print("cancelled ");
 420   }
 421   st->print("\n");
 422 
 423   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 424   st->print   ("Virtual space:");
 425   if (_storage.special()) st->print(" (pinned in memory)");
 426   st->cr();
 427   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 428   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 429   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 430   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 431 
 432   if (Verbose) {
 433     print_heap_regions(st);
 434   }
 435 }
 436 
 437 class InitGCLABClosure : public ThreadClosure {
 438 public:
 439   void do_thread(Thread* thread) {
 440     thread->gclab().initialize(true);
 441   }
 442 };
 443 
 444 void ShenandoahHeap::post_initialize() {
 445   if (UseTLAB) {
 446     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 447     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 448     // Their initialization should be handled separately. Is we miss some threads here,
 449     // then any other TLAB-related activity would fail with asserts.
 450 
 451     InitGCLABClosure init_gclabs;
 452     {
 453       MutexLocker ml(Threads_lock);
 454       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 455         init_gclabs.do_thread(thread);
 456       }
 457     }
 458     gc_threads_do(&init_gclabs);
 459 
 460     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 461     // Now, we will let WorkGang to initialize gclab when new worker is created.
 462     _workers->set_initialize_gclab();
 463   }
 464 
 465   _scm->initialize(_max_workers);
 466 
 467   ref_processing_init();
 468 }
 469 
 470 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 471   size_t sum;
 472 public:
 473 
 474   CalculateUsedRegionClosure() {
 475     sum = 0;
 476   }
 477 
 478   bool doHeapRegion(ShenandoahHeapRegion* r) {
 479     sum = sum + r->used();
 480     return false;
 481   }
 482 
 483   size_t getResult() { return sum;}
 484 };
 485 
 486 size_t ShenandoahHeap::calculateUsed() {
 487   CalculateUsedRegionClosure cl;
 488   heap_region_iterate(&cl);
 489   return cl.getResult();
 490 }
 491 
 492 void ShenandoahHeap::verify_heap_size_consistency() {
 493 
 494   assert(calculateUsed() == used(),
 495          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 496 }
 497 
 498 size_t ShenandoahHeap::used() const {
 499   OrderAccess::acquire();
 500   return _used;
 501 }
 502 
 503 void ShenandoahHeap::increase_used(size_t bytes) {
 504   assert_heaplock_or_safepoint();
 505   _used += bytes;
 506 }
 507 
 508 void ShenandoahHeap::set_used(size_t bytes) {
 509   assert_heaplock_or_safepoint();
 510   _used = bytes;
 511 }
 512 
 513 void ShenandoahHeap::decrease_used(size_t bytes) {
 514   assert_heaplock_or_safepoint();
 515   assert(_used >= bytes, "never decrease heap size by more than we've left");
 516   _used -= bytes;
 517 }
 518 
 519 size_t ShenandoahHeap::capacity() const {
 520   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 521 }
 522 
 523 bool ShenandoahHeap::is_maximal_no_gc() const {
 524   Unimplemented();
 525   return true;
 526 }
 527 
 528 size_t ShenandoahHeap::max_capacity() const {
 529   return _max_regions * ShenandoahHeapRegion::region_size_bytes();
 530 }
 531 
 532 size_t ShenandoahHeap::min_capacity() const {
 533   return _initialSize;
 534 }
 535 
 536 VirtualSpace* ShenandoahHeap::storage() const {
 537   return (VirtualSpace*) &_storage;
 538 }
 539 
 540 bool ShenandoahHeap::is_in(const void* p) const {
 541   HeapWord* heap_base = (HeapWord*) base();
 542   HeapWord* last_region_end = heap_base + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * _num_regions;
 543   return p >= heap_base && p < last_region_end;
 544 }
 545 
 546 bool ShenandoahHeap::is_scavengable(const void* p) {
 547   return true;
 548 }
 549 
 550 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 551   // Retain tlab and allocate object in shared space if
 552   // the amount free in the tlab is too large to discard.
 553   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 554     thread->gclab().record_slow_allocation(size);
 555     return NULL;
 556   }
 557 
 558   // Discard gclab and allocate a new one.
 559   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 560   size_t new_gclab_size = thread->gclab().compute_size(size);
 561 
 562   thread->gclab().clear_before_allocation();
 563 
 564   if (new_gclab_size == 0) {
 565     return NULL;
 566   }
 567 
 568   // Allocate a new GCLAB...
 569   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 570   if (obj == NULL) {
 571     return NULL;
 572   }
 573 
 574   if (ZeroTLAB) {
 575     // ..and clear it.
 576     Copy::zero_to_words(obj, new_gclab_size);
 577   } else {
 578     // ...and zap just allocated object.
 579 #ifdef ASSERT
 580     // Skip mangling the space corresponding to the object header to
 581     // ensure that the returned space is not considered parsable by
 582     // any concurrent GC thread.
 583     size_t hdr_size = oopDesc::header_size();
 584     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 585 #endif // ASSERT
 586   }
 587   thread->gclab().fill(obj, obj + size, new_gclab_size);
 588   return obj;
 589 }
 590 
 591 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 592 #ifdef ASSERT
 593   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 594 #endif
 595   return allocate_new_lab(word_size, _lab_thread);
 596 }
 597 
 598 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 599 #ifdef ASSERT
 600   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 601 #endif
 602   return allocate_new_lab(word_size, _lab_gc);
 603 }
 604 
 605 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, LabType type) {
 606 
 607   HeapWord* result = allocate_memory(word_size, type);
 608 
 609   if (result != NULL) {
 610     assert(! in_collection_set(result), "Never allocate in dirty region");
 611     _bytes_allocated_since_cm += word_size * HeapWordSize;
 612 
 613     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 614 
 615   }
 616   return result;
 617 }
 618 
 619 ShenandoahHeap* ShenandoahHeap::heap() {
 620   CollectedHeap* heap = Universe::heap();
 621   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 622   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 623   return (ShenandoahHeap*) heap;
 624 }
 625 
 626 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 627   CollectedHeap* heap = Universe::heap();
 628   return (ShenandoahHeap*) heap;
 629 }
 630 
 631 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size, LabType type) {
 632 
 633   ShenandoahHeapLock heap_lock(this);
 634 
 635   HeapWord* result = allocate_memory_under_lock(word_size, type);
 636   size_t grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::region_size_bytes() - 1) / ShenandoahHeapRegion::region_size_bytes();
 637 
 638   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 639     grow_heap_by(grow_by);
 640     result = allocate_memory_under_lock(word_size, type);
 641   }
 642 
 643   return result;
 644 }
 645 
 646 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, LabType type) {
 647   HeapWord* result = NULL;
 648   result = allocate_memory_work(word_size, type);
 649 
 650   if (type == _lab_thread) {
 651     // Allocation failed, try full-GC, then retry allocation.
 652     //
 653     // It might happen that one of the threads requesting allocation would unblock
 654     // way later after full-GC happened, only to fail the second allocation, because
 655     // other threads have already depleted the free storage. In this case, a better
 656     // strategy would be to try full-GC again.
 657     //
 658     // Lacking the way to detect progress from "collect" call, we are left with blindly
 659     // retrying for some bounded number of times.
 660     // TODO: Poll if Full GC made enough progress to warrant retry.
 661     int tries = 0;
 662     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 663       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 664                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 665       collect(GCCause::_allocation_failure);
 666       result = allocate_memory_work(word_size, type);
 667     }
 668   }
 669 
 670   // Only update monitoring counters when not calling from a write-barrier.
 671   // Otherwise we might attempt to grab the Service_lock, which we must
 672   // not do when coming from a write-barrier (because the thread might
 673   // already hold the Compile_lock).
 674   if (type == _lab_thread) {
 675     monitoring_support()->update_counters();
 676   }
 677 
 678   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 679                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 680 
 681   return result;
 682 }
 683 
 684 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, LabType type) {
 685   assert_heaplock_owned_by_current_thread();
 686 
 687   if (word_size * HeapWordSize > ShenandoahHeapRegion::region_size_bytes()) {
 688     return allocate_large_memory(word_size);
 689   }
 690 
 691   // Not enough memory in free region set.
 692   // Coming out of full GC, it is possible that there is not
 693   // free region available, so current_index may not be valid.
 694   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 695 
 696   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 697 
 698   if (my_current_region == NULL) {
 699     return NULL; // No more room to make a new region. OOM.
 700   }
 701   assert(my_current_region != NULL, "should have a region at this point");
 702   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 703   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 704 
 705   HeapWord* result = my_current_region->allocate_lab(word_size, type);
 706 
 707   while (result == NULL) {
 708     // 2nd attempt. Try next region.
 709 #ifdef ASSERT
 710     if (my_current_region->free() > 0) {
 711       log_debug(gc, alloc)("Retire region with " SIZE_FORMAT " bytes free", my_current_region->free());
 712     }
 713 #endif
 714     _free_regions->increase_used(my_current_region->free());
 715     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 716     assert(next_region != my_current_region, "must not get current again");
 717     my_current_region = next_region;
 718 
 719     if (my_current_region == NULL) {
 720       return NULL; // No more room to make a new region. OOM.
 721     }
 722     assert(my_current_region != NULL, "should have a region at this point");
 723     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 724     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 725     result = my_current_region->allocate_lab(word_size, type);
 726   }
 727 
 728   my_current_region->increase_live_data_words(word_size);
 729   increase_used(word_size * HeapWordSize);
 730   _free_regions->increase_used(word_size * HeapWordSize);
 731   return result;
 732 }
 733 
 734 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 735   assert_heaplock_owned_by_current_thread();
 736 
 737   size_t required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 738   if (required_regions > _max_regions) return NULL;
 739 
 740   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 741 
 742   HeapWord* result = NULL;
 743 
 744   if (r != NULL)  {
 745     result = r->bottom();
 746 
 747     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 748                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 749   } else {
 750     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 751                              (words * HeapWordSize) / K, p2i(result));
 752   }
 753 
 754 
 755   return result;
 756 
 757 }
 758 
 759 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 760                                         bool*  gc_overhead_limit_was_exceeded) {
 761 
 762   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _lab_thread);
 763   HeapWord* result = filler + BrooksPointer::word_size();
 764   if (filler != NULL) {
 765     BrooksPointer::initialize(oop(result));
 766     _bytes_allocated_since_cm += size * HeapWordSize;
 767 
 768     assert(! in_collection_set(result), "never allocate in targetted region");
 769     return result;
 770   } else {
 771     /*
 772     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 773                   size, used(), _bytes_allocated_since_cm);
 774     {
 775       print_heap_regions();
 776       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 777       _free_regions->print();
 778     }
 779     */
 780     return NULL;
 781   }
 782 }
 783 
 784 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 785 private:
 786   ShenandoahHeap* _heap;
 787   Thread* _thread;
 788   public:
 789   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 790     _heap(heap), _thread(Thread::current()) {
 791   }
 792 
 793   void do_object(oop p) {
 794 
 795     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 796 
 797     assert(_heap->is_marked_complete(p), "expect only marked objects");
 798     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 799       bool evac;
 800       _heap->evacuate_object(p, _thread, evac);
 801     }
 802   }
 803 };
 804 
 805 #ifdef ASSERT
 806 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 807 
 808 public:
 809 
 810   void do_object(oop p) {
 811     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 812       oop p_prime = oopDesc::bs()->read_barrier(p);
 813       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 814       if (p->klass() != p_prime->klass()) {
 815         tty->print_cr("copy has different class than original:");
 816         p->klass()->print_on(tty);
 817         p_prime->klass()->print_on(tty);
 818       }
 819       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 820       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 821       assert(p->size() == p_prime->size(), "Should be the same size");
 822       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 823     }
 824   }
 825 };
 826 
 827 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 828   VerifyEvacuatedObjectClosure verify_evacuation;
 829   marked_object_iterate(from_region, &verify_evacuation);
 830 }
 831 #endif
 832 
 833 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 834 
 835   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 836 
 837   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 838 
 839   marked_object_iterate(from_region, &evacuate_region);
 840 
 841 #ifdef ASSERT
 842   if (ShenandoahVerify && ! cancelled_concgc()) {
 843     verify_evacuated_region(from_region);
 844   }
 845 #endif
 846 }
 847 
 848 class ParallelEvacuationTask : public AbstractGangTask {
 849 private:
 850   ShenandoahHeap* _sh;
 851   ShenandoahCollectionSet* _cs;
 852 
 853 public:
 854   ParallelEvacuationTask(ShenandoahHeap* sh,
 855                          ShenandoahCollectionSet* cs) :
 856     AbstractGangTask("Parallel Evacuation Task"),
 857     _cs(cs),
 858     _sh(sh) {}
 859 
 860   void work(uint worker_id) {
 861 
 862     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 863 
 864     while (from_hr != NULL) {
 865       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 866                                     worker_id,
 867                                     from_hr->region_number());
 868 
 869       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 870       _sh->parallel_evacuate_region(from_hr);
 871 
 872       if (_sh->cancelled_concgc()) {
 873         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 874         break;
 875       }
 876       from_hr = _cs->claim_next();
 877     }
 878   }
 879 };
 880 
 881 void ShenandoahHeap::recycle_dirty_regions() {
 882   ShenandoahHeapLock lock(this);
 883 
 884   size_t bytes_reclaimed = 0;
 885 
 886   ShenandoahHeapRegionSet* set = regions();
 887   set->clear_current_index();
 888 
 889   start_deferred_recycling();
 890 
 891   ShenandoahHeapRegion* r = set->claim_next();
 892   while (r != NULL) {
 893     if (in_collection_set(r)) {
 894       decrease_used(r->used());
 895       bytes_reclaimed += r->used();
 896       defer_recycle(r);
 897     }
 898     r = set->claim_next();
 899   }
 900 
 901   finish_deferred_recycle();
 902 
 903   _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
 904   if (! cancelled_concgc()) {
 905     clear_cset_fast_test();
 906   }
 907 }
 908 
 909 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 910   return _free_regions;
 911 }
 912 
 913 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 914   _ordered_regions->print(st);
 915 }
 916 
 917 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 918 private:
 919   int _index;
 920   const char* _prefix;
 921 
 922 public:
 923   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 924 
 925 private:
 926   template <class T>
 927   inline void do_oop_work(T* p) {
 928     oop o = oopDesc::load_decode_heap_oop(p);
 929     if (o != NULL) {
 930       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 931         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 932                       _prefix, _index,
 933                       p2i(p), p2i(o),
 934                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 935                       o->klass()->internal_name(), p2i(o->klass()));
 936       } else {
 937         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 938                       _prefix, _index,
 939                       p2i(p), p2i(o));
 940       }
 941     } else {
 942       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 943     }
 944     _index++;
 945   }
 946 
 947 public:
 948   void do_oop(oop* p) {
 949     do_oop_work(p);
 950   }
 951 
 952   void do_oop(narrowOop* p) {
 953     do_oop_work(p);
 954   }
 955 
 956 };
 957 
 958 class PrintAllRefsObjectClosure : public ObjectClosure {
 959   const char* _prefix;
 960 
 961 public:
 962   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 963 
 964   void do_object(oop p) {
 965     if (ShenandoahHeap::heap()->is_in(p)) {
 966         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 967                       _prefix, p2i(p),
 968                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 969                       p->klass()->internal_name(), p2i(p->klass()));
 970         PrintAllRefsOopClosure cl(_prefix);
 971         p->oop_iterate(&cl);
 972       }
 973   }
 974 };
 975 
 976 void ShenandoahHeap::print_all_refs(const char* prefix) {
 977   tty->print_cr("printing all references in the heap");
 978   tty->print_cr("root references:");
 979 
 980   ensure_parsability(false);
 981 
 982   PrintAllRefsOopClosure cl(prefix);
 983   roots_iterate(&cl);
 984 
 985   tty->print_cr("heap references:");
 986   PrintAllRefsObjectClosure cl2(prefix);
 987   object_iterate(&cl2);
 988 }
 989 
 990 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 991 private:
 992   ShenandoahHeap*  _heap;
 993 
 994 public:
 995   VerifyAfterMarkingOopClosure() :
 996     _heap(ShenandoahHeap::heap()) { }
 997 
 998 private:
 999   template <class T>
1000   inline void do_oop_work(T* p) {
1001     oop o = oopDesc::load_decode_heap_oop(p);
1002     if (o != NULL) {
1003       if (! _heap->is_marked_complete(o)) {
1004         _heap->print_heap_regions();
1005         _heap->print_all_refs("post-mark");
1006         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1007                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1008         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1009 
1010         tty->print_cr("oop class: %s", o->klass()->internal_name());
1011         if (_heap->is_in(p)) {
1012           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1013           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1014           referrer->print();
1015           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1016         }
1017         tty->print_cr("heap region containing object:");
1018         _heap->heap_region_containing(o)->print();
1019         tty->print_cr("heap region containing referrer:");
1020         _heap->heap_region_containing(p)->print();
1021         tty->print_cr("heap region containing forwardee:");
1022         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1023       }
1024       assert(o->is_oop(), "oop must be an oop");
1025       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1026       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1027         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1028                       p2i(p),
1029                       BOOL_TO_STR(_heap->in_collection_set(p)),
1030                       p2i(o),
1031                       BOOL_TO_STR(_heap->in_collection_set(o)),
1032                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1033                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1034         tty->print_cr("oop class: %s", o->klass()->internal_name());
1035       }
1036       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1037       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1038       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1039     }
1040   }
1041 
1042 public:
1043   void do_oop(oop* p) {
1044     do_oop_work(p);
1045   }
1046 
1047   void do_oop(narrowOop* p) {
1048     do_oop_work(p);
1049   }
1050 
1051 };
1052 
1053 void ShenandoahHeap::verify_heap_after_marking() {
1054 
1055   verify_heap_size_consistency();
1056 
1057   log_trace(gc)("verifying heap after marking");
1058 
1059   VerifyAfterMarkingOopClosure cl;
1060   roots_iterate(&cl);
1061   ObjectToOopClosure objs(&cl);
1062   object_iterate(&objs);
1063 }
1064 
1065 
1066 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1067   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1068 
1069   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1070   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1071   size_t required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1072   size_t index = r->region_number();
1073 
1074 
1075   assert(!r->has_live(), "liveness must be zero");
1076 
1077   for(size_t i = 0; i < required_regions; i++) {
1078 
1079     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1080 
1081     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1082            "expect correct humongous start or continuation");
1083 
1084     if (log_is_enabled(Debug, gc, humongous)) {
1085       log_debug(gc, humongous)("reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1086       ResourceMark rm;
1087       outputStream* out = Log(gc, humongous)::debug_stream();
1088       region->print_on(out);
1089     }
1090 
1091     region->recycle();
1092     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::region_size_bytes());
1093   }
1094 }
1095 
1096 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1097 
1098   bool doHeapRegion(ShenandoahHeapRegion* r) {
1099     ShenandoahHeap* heap = ShenandoahHeap::heap();
1100 
1101     if (r->is_humongous_start()) {
1102       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1103       if (! heap->is_marked_complete(humongous_obj)) {
1104 
1105         heap->reclaim_humongous_region_at(r);
1106       }
1107     }
1108     return false;
1109   }
1110 };
1111 
1112 #ifdef ASSERT
1113 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1114   bool doHeapRegion(ShenandoahHeapRegion* r) {
1115     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1116     return false;
1117   }
1118 };
1119 #endif
1120 
1121 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1122   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1123 
1124   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1125 
1126   if (!cancelled_concgc()) {
1127     // Allocations might have happened before we STWed here, record peak:
1128     shenandoahPolicy()->record_peak_occupancy();
1129 
1130     recycle_dirty_regions();
1131 
1132     ensure_parsability(true);
1133 
1134     if (UseShenandoahMatrix && PrintShenandoahMatrix) {
1135       outputStream* log = Log(gc)::info_stream();
1136       connection_matrix()->print_on(log);
1137     }
1138 
1139     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1140       verify_heap_reachable_at_safepoint();
1141     }
1142 
1143 #ifdef ASSERT
1144     if (ShenandoahVerify) {
1145       verify_heap_after_marking();
1146     }
1147 #endif
1148 
1149     // NOTE: This needs to be done during a stop the world pause, because
1150     // putting regions into the collection set concurrently with Java threads
1151     // will create a race. In particular, acmp could fail because when we
1152     // resolve the first operand, the containing region might not yet be in
1153     // the collection set, and thus return the original oop. When the 2nd
1154     // operand gets resolved, the region could be in the collection set
1155     // and the oop gets evacuated. If both operands have originally been
1156     // the same, we get false negatives.
1157 
1158     {
1159       ShenandoahHeapLock lock(this);
1160       _collection_set->clear();
1161       _free_regions->clear();
1162 
1163       ShenandoahReclaimHumongousRegionsClosure reclaim;
1164       heap_region_iterate(&reclaim);
1165 
1166 #ifdef ASSERT
1167       CheckCollectionSetClosure ccsc;
1168       _ordered_regions->heap_region_iterate(&ccsc);
1169 #endif
1170 
1171       _shenandoah_policy->choose_collection_set(_collection_set);
1172 
1173       _shenandoah_policy->choose_free_set(_free_regions);
1174     }
1175 
1176     _bytes_allocated_since_cm = 0;
1177 
1178     Universe::update_heap_info_at_gc();
1179   }
1180 }
1181 
1182 
1183 class RetireTLABClosure : public ThreadClosure {
1184 private:
1185   bool _retire;
1186 
1187 public:
1188   RetireTLABClosure(bool retire) : _retire(retire) {
1189   }
1190 
1191   void do_thread(Thread* thread) {
1192     thread->gclab().make_parsable(_retire);
1193   }
1194 };
1195 
1196 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1197   if (UseTLAB) {
1198     CollectedHeap::ensure_parsability(retire_tlabs);
1199     RetireTLABClosure cl(retire_tlabs);
1200     Threads::threads_do(&cl);
1201   }
1202 }
1203 
1204 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1205 private:
1206   ShenandoahHeap* _heap;
1207   Thread* _thread;
1208 public:
1209   ShenandoahEvacuateUpdateRootsClosure() :
1210     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1211   }
1212 
1213 private:
1214   template <class T>
1215   void do_oop_work(T* p) {
1216     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1217 
1218     T o = oopDesc::load_heap_oop(p);
1219     if (! oopDesc::is_null(o)) {
1220       oop obj = oopDesc::decode_heap_oop_not_null(o);
1221       if (_heap->in_collection_set(obj)) {
1222         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1223                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1224         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1225         if (oopDesc::unsafe_equals(resolved, obj)) {
1226           bool evac;
1227           resolved = _heap->evacuate_object(obj, _thread, evac);
1228         }
1229         oopDesc::encode_store_heap_oop(p, resolved);
1230       }
1231     }
1232   }
1233 
1234 public:
1235   void do_oop(oop* p) {
1236     do_oop_work(p);
1237   }
1238   void do_oop(narrowOop* p) {
1239     do_oop_work(p);
1240   }
1241 };
1242 
1243 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1244   ShenandoahRootEvacuator* _rp;
1245 public:
1246 
1247   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1248     AbstractGangTask("Shenandoah evacuate and update roots"),
1249     _rp(rp)
1250   {
1251     // Nothing else to do.
1252   }
1253 
1254   void work(uint worker_id) {
1255     ShenandoahEvacuateUpdateRootsClosure cl;
1256     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1257 
1258     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1259   }
1260 };
1261 
1262 class ShenandoahFixRootsTask : public AbstractGangTask {
1263   ShenandoahRootEvacuator* _rp;
1264 public:
1265 
1266   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1267     AbstractGangTask("Shenandoah update roots"),
1268     _rp(rp)
1269   {
1270     // Nothing else to do.
1271   }
1272 
1273   void work(uint worker_id) {
1274     SCMUpdateRefsClosure cl;
1275     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1276 
1277     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1278   }
1279 };
1280 void ShenandoahHeap::evacuate_and_update_roots() {
1281 
1282   COMPILER2_PRESENT(DerivedPointerTable::clear());
1283 
1284   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1285 
1286   {
1287     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::init_evac);
1288     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1289     workers()->run_task(&roots_task);
1290   }
1291 
1292   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1293 
1294   if (cancelled_concgc()) {
1295     // If initial evacuation has been cancelled, we need to update all references
1296     // after all workers have finished. Otherwise we might run into the following problem:
1297     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1298     // GC thread 2 evacuates the same object X to to-space
1299     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1300     // clear() and update_pointers() must always be called in pairs,
1301     // cannot nest with above clear()/update_pointers().
1302     COMPILER2_PRESENT(DerivedPointerTable::clear());
1303     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::init_evac);
1304     ShenandoahFixRootsTask update_roots_task(&rp);
1305     workers()->run_task(&update_roots_task);
1306     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1307   }
1308 
1309 #ifdef ASSERT
1310   {
1311     AssertToSpaceClosure cl;
1312     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1313     ShenandoahRootEvacuator rp(this, 1);
1314     rp.process_evacuate_roots(&cl, &code_cl, 0);
1315   }
1316 #endif
1317 }
1318 
1319 
1320 void ShenandoahHeap::do_evacuation() {
1321 
1322   parallel_evacuate();
1323 
1324   if (ShenandoahVerify && ! cancelled_concgc()) {
1325     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1326     if (Thread::current()->is_VM_thread()) {
1327       verify_after_evacuation.doit();
1328     } else {
1329       VMThread::execute(&verify_after_evacuation);
1330     }
1331   }
1332 
1333 }
1334 
1335 void ShenandoahHeap::parallel_evacuate() {
1336   log_develop_trace(gc)("starting parallel_evacuate");
1337 
1338   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1339 
1340   if (log_is_enabled(Trace, gc, region)) {
1341     ResourceMark rm;
1342     outputStream *out = Log(gc, region)::trace_stream();
1343     out->print("Printing all available regions");
1344     print_heap_regions(out);
1345   }
1346 
1347   if (log_is_enabled(Trace, gc, cset)) {
1348     ResourceMark rm;
1349     outputStream *out = Log(gc, cset)::trace_stream();
1350     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1351     _collection_set->print(out);
1352 
1353     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1354     _free_regions->print(out);
1355   }
1356 
1357   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1358 
1359 
1360   workers()->run_task(&evacuationTask);
1361 
1362   if (log_is_enabled(Trace, gc, cset)) {
1363     ResourceMark rm;
1364     outputStream *out = Log(gc, cset)::trace_stream();
1365     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1366                _collection_set->count());
1367 
1368     _collection_set->print(out);
1369 
1370     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1371                _free_regions->count());
1372     _free_regions->print(out);
1373 
1374   }
1375 
1376   if (log_is_enabled(Trace, gc, region)) {
1377     ResourceMark rm;
1378     outputStream *out = Log(gc, region)::trace_stream();
1379     out->print_cr("all regions after evacuation:");
1380     print_heap_regions(out);
1381   }
1382 
1383   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1384 }
1385 
1386 class VerifyEvacuationClosure: public ExtendedOopClosure {
1387 private:
1388   ShenandoahHeap*  _heap;
1389   ShenandoahHeapRegion* _from_region;
1390 
1391 public:
1392   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1393     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1394 private:
1395   template <class T>
1396   inline void do_oop_work(T* p) {
1397     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1398     if (! oopDesc::is_null(heap_oop)) {
1399       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1400     }
1401   }
1402 
1403 public:
1404   void do_oop(oop* p)       {
1405     do_oop_work(p);
1406   }
1407 
1408   void do_oop(narrowOop* p) {
1409     do_oop_work(p);
1410   }
1411 
1412 };
1413 
1414 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1415 
1416   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1417 
1418   CodeBlobToOopClosure blobsCl(cl, false);
1419   CLDToOopClosure cldCl(cl);
1420 
1421   ShenandoahRootProcessor rp(this, 1);
1422   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1423 }
1424 
1425 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1426 
1427   VerifyEvacuationClosure rootsCl(from_region);
1428   roots_iterate(&rootsCl);
1429 
1430 }
1431 
1432 bool ShenandoahHeap::supports_tlab_allocation() const {
1433   return true;
1434 }
1435 
1436 
1437 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1438   size_t idx = _free_regions->current_index();
1439   ShenandoahHeapRegion* current = _free_regions->get_or_null(idx);
1440   if (current == NULL) {
1441     return 0;
1442   } else if (current->free() >= MinTLABSize) {
1443     // Current region has enough space left, can use it.
1444     return current->free();
1445   } else {
1446     // No more space in current region, peek next region
1447     return _free_regions->unsafe_peek_next_no_humongous();
1448   }
1449 }
1450 
1451 size_t ShenandoahHeap::max_tlab_size() const {
1452   return ShenandoahHeapRegion::region_size_bytes();
1453 }
1454 
1455 class ResizeGCLABClosure : public ThreadClosure {
1456 public:
1457   void do_thread(Thread* thread) {
1458     thread->gclab().resize();
1459   }
1460 };
1461 
1462 void ShenandoahHeap::resize_all_tlabs() {
1463   CollectedHeap::resize_all_tlabs();
1464 
1465   ResizeGCLABClosure cl;
1466   Threads::threads_do(&cl);
1467 }
1468 
1469 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1470 public:
1471   void do_thread(Thread* thread) {
1472     thread->gclab().accumulate_statistics();
1473     thread->gclab().initialize_statistics();
1474   }
1475 };
1476 
1477 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1478   AccumulateStatisticsGCLABClosure cl;
1479   Threads::threads_do(&cl);
1480 }
1481 
1482 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1483   return true;
1484 }
1485 
1486 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1487   // Overridden to do nothing.
1488   return new_obj;
1489 }
1490 
1491 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1492   return true;
1493 }
1494 
1495 bool ShenandoahHeap::card_mark_must_follow_store() const {
1496   return false;
1497 }
1498 
1499 void ShenandoahHeap::collect(GCCause::Cause cause) {
1500   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1501   if (GCCause::is_user_requested_gc(cause)) {
1502     if (! DisableExplicitGC) {
1503       _concurrent_gc_thread->do_full_gc(cause);
1504     }
1505   } else if (cause == GCCause::_allocation_failure) {
1506     collector_policy()->set_should_clear_all_soft_refs(true);
1507     _concurrent_gc_thread->do_full_gc(cause);
1508   }
1509 }
1510 
1511 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1512   //assert(false, "Shouldn't need to do full collections");
1513 }
1514 
1515 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1516   Unimplemented();
1517   return NULL;
1518 
1519 }
1520 
1521 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1522   return _shenandoah_policy;
1523 }
1524 
1525 
1526 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1527   Space* sp = heap_region_containing(addr);
1528   if (sp != NULL) {
1529     return sp->block_start(addr);
1530   }
1531   return NULL;
1532 }
1533 
1534 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1535   Space* sp = heap_region_containing(addr);
1536   assert(sp != NULL, "block_size of address outside of heap");
1537   return sp->block_size(addr);
1538 }
1539 
1540 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1541   Space* sp = heap_region_containing(addr);
1542   return sp->block_is_obj(addr);
1543 }
1544 
1545 jlong ShenandoahHeap::millis_since_last_gc() {
1546   return 0;
1547 }
1548 
1549 void ShenandoahHeap::prepare_for_verify() {
1550   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1551     ensure_parsability(false);
1552   }
1553 }
1554 
1555 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1556   workers()->print_worker_threads_on(st);
1557 }
1558 
1559 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1560   workers()->threads_do(tcl);
1561 }
1562 
1563 void ShenandoahHeap::print_tracing_info() const {
1564   if (log_is_enabled(Info, gc, stats)) {
1565     ResourceMark rm;
1566     outputStream* out = Log(gc, stats)::info_stream();
1567     _shenandoah_policy->print_tracing_info(out);
1568   }
1569 }
1570 
1571 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1572 private:
1573   ShenandoahHeap*  _heap;
1574   VerifyOption     _vo;
1575   bool             _failures;
1576 public:
1577   // _vo == UsePrevMarking -> use "prev" marking information,
1578   // _vo == UseNextMarking -> use "next" marking information,
1579   // _vo == UseMarkWord    -> use mark word from object header.
1580   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1581     _heap(ShenandoahHeap::heap()),
1582     _vo(vo),
1583     _failures(false) { }
1584 
1585   bool failures() { return _failures; }
1586 
1587 private:
1588   template <class T>
1589   inline void do_oop_work(T* p) {
1590     oop obj = oopDesc::load_decode_heap_oop(p);
1591     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1592       { // Just for debugging.
1593         tty->print_cr("Root location "PTR_FORMAT
1594                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1595         //      obj->print_on(tty);
1596       }
1597     }
1598     guarantee(obj->is_oop_or_null(), "is oop or null");
1599   }
1600 
1601 public:
1602   void do_oop(oop* p)       {
1603     do_oop_work(p);
1604   }
1605 
1606   void do_oop(narrowOop* p) {
1607     do_oop_work(p);
1608   }
1609 
1610 };
1611 
1612 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1613 private:
1614   ShenandoahVerifyRootsClosure _rootsCl;
1615 public:
1616   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1617     _rootsCl(rc) {};
1618 
1619   void do_object(oop p) {
1620     _rootsCl.do_oop(&p);
1621   }
1622 };
1623 
1624 void ShenandoahHeap::verify(VerifyOption vo) {
1625   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1626 
1627     ShenandoahVerifyRootsClosure rootsCl(vo);
1628 
1629     assert(Thread::current()->is_VM_thread(),
1630            "Expected to be executed serially by the VM thread at this point");
1631 
1632     roots_iterate(&rootsCl);
1633 
1634     bool failures = rootsCl.failures();
1635     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1636 
1637     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1638 
1639     object_iterate(&heapCl);
1640     // TODO: Implement rest of it.
1641   } else {
1642     tty->print("(SKIPPING roots, heapRegions, remset) ");
1643   }
1644 }
1645 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1646   return _free_regions->capacity();
1647 }
1648 
1649 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1650   ObjectClosure* _cl;
1651 public:
1652   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1653   bool doHeapRegion(ShenandoahHeapRegion* r) {
1654     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1655     return false;
1656   }
1657 };
1658 
1659 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1660   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1661   heap_region_iterate(&blk, false, true);
1662 }
1663 
1664 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1665 private:
1666   ShenandoahHeap* _heap;
1667 
1668 public:
1669   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1670 
1671 private:
1672   template <class T>
1673   inline void do_oop_work(T* p) {
1674     T o = oopDesc::load_heap_oop(p);
1675     if (!oopDesc::is_null(o)) {
1676       oop obj = oopDesc::decode_heap_oop_not_null(o);
1677       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1678     }
1679   }
1680 public:
1681   void do_oop(oop* p) {
1682     do_oop_work(p);
1683   }
1684   void do_oop(narrowOop* p) {
1685     do_oop_work(p);
1686   }
1687 };
1688 
1689 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1690 private:
1691   ObjectClosure* _cl;
1692 public:
1693   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1694 
1695   virtual void do_object(oop obj) {
1696     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1697             "avoid double-counting: only non-forwarded objects here");
1698 
1699     // Fix up the ptrs.
1700     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1701     obj->oop_iterate(&adjust_ptrs);
1702 
1703     // Can reply the object now:
1704     _cl->do_object(obj);
1705   }
1706 };
1707 
1708 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1709   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1710 
1711   // Safe iteration does objects only with correct references.
1712   // This is why we skip dirty regions that have stale copies of objects,
1713   // and fix up the pointers in the returned objects.
1714 
1715   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1716   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1717   heap_region_iterate(&blk,
1718                       /* skip_dirty_regions = */ true,
1719                       /* skip_humongous_continuations = */ true);
1720 
1721   _need_update_refs = false; // already updated the references
1722 }
1723 
1724 // Apply blk->doHeapRegion() on all committed regions in address order,
1725 // terminating the iteration early if doHeapRegion() returns true.
1726 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1727   for (size_t i = 0; i < _num_regions; i++) {
1728     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1729     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1730       continue;
1731     }
1732     if (skip_dirty_regions && in_collection_set(current)) {
1733       continue;
1734     }
1735     if (blk->doHeapRegion(current)) {
1736       return;
1737     }
1738   }
1739 }
1740 
1741 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1742   ShenandoahHeap* sh;
1743 public:
1744   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1745 
1746   bool doHeapRegion(ShenandoahHeapRegion* r) {
1747     r->clear_live_data();
1748     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1749     return false;
1750   }
1751 };
1752 
1753 void ShenandoahHeap::start_concurrent_marking() {
1754 
1755   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1756   accumulate_statistics_all_tlabs();
1757   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1758 
1759   set_concurrent_mark_in_progress(true);
1760   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1761   if (UseTLAB) {
1762     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1763     ensure_parsability(true);
1764     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1765   }
1766 
1767   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1768   _used_start_gc = used();
1769 
1770   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1771   ClearLivenessClosure clc(this);
1772   heap_region_iterate(&clc);
1773   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1774 
1775   if (UseShenandoahMatrix) {
1776     connection_matrix()->clear_all();
1777   }
1778 
1779   // Make above changes visible to worker threads
1780   OrderAccess::fence();
1781 
1782   concurrentMark()->init_mark_roots();
1783 }
1784 
1785 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1786 
1787   ShenandoahHeap* _sh;
1788 
1789 public:
1790   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1791 
1792   template<class T> void do_oop_nv(T* p) {
1793     T heap_oop = oopDesc::load_heap_oop(p);
1794     if (!oopDesc::is_null(heap_oop)) {
1795       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1796       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1797                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1798                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1799                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1800                 obj->klass()->external_name(),
1801                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1802                 );
1803       obj = oopDesc::bs()->read_barrier(obj);
1804       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1805       guarantee(obj->is_oop(), "is_oop");
1806       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1807     }
1808   }
1809 
1810   void do_oop(oop* p)       { do_oop_nv(p); }
1811   void do_oop(narrowOop* p) { do_oop_nv(p); }
1812 
1813 };
1814 
1815 void ShenandoahHeap::verify_heap_after_evacuation() {
1816 
1817   verify_heap_size_consistency();
1818 
1819   ensure_parsability(false);
1820 
1821   VerifyAfterEvacuationClosure cl;
1822   roots_iterate(&cl);
1823 
1824   ObjectToOopClosure objs(&cl);
1825   object_iterate(&objs);
1826 
1827 }
1828 
1829 void ShenandoahHeap::swap_mark_bitmaps() {
1830   // Swap bitmaps.
1831   CMBitMap* tmp1 = _complete_mark_bit_map;
1832   _complete_mark_bit_map = _next_mark_bit_map;
1833   _next_mark_bit_map = tmp1;
1834 
1835   // Swap top-at-mark-start pointers
1836   HeapWord** tmp2 = _complete_top_at_mark_starts;
1837   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1838   _next_top_at_mark_starts = tmp2;
1839 
1840   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1841   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1842   _next_top_at_mark_starts_base = tmp3;
1843 }
1844 
1845 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1846 private:
1847   SCMObjToScanQueue* _queue;
1848   ShenandoahHeap* _heap;
1849   CMBitMap* _map;
1850   bool _check_matrix;
1851   oop _obj;
1852 public:
1853   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1854           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1855   template <class T>
1856   void do_oop_work(T* p) {
1857     T o = oopDesc::load_heap_oop(p);
1858     if (!oopDesc::is_null(o)) {
1859       oop obj = oopDesc::decode_heap_oop_not_null(o);
1860       guarantee(check_obj_alignment(obj), "sanity");
1861 
1862       guarantee(!oopDesc::is_null(obj), "sanity");
1863       guarantee(_heap->is_in(obj), "sanity");
1864 
1865       oop forw = BrooksPointer::forwardee(obj);
1866       guarantee(!oopDesc::is_null(forw), "sanity");
1867       guarantee(_heap->is_in(forw), "sanity");
1868 
1869       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1870 
1871       if (_check_matrix) {
1872         size_t from_idx = _heap->heap_region_index_containing(p);
1873         size_t to_idx = _heap->heap_region_index_containing(obj);
1874         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1875           tty->print_cr("from-obj: ");
1876           _obj->print_on(tty);
1877           tty->print_cr("to-obj:");
1878           obj->print_on(tty);
1879           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1880           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1881           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1882           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1883           tty->print_cr("from-idx: " SIZE_FORMAT ", to-idx: " SIZE_FORMAT, from_idx, to_idx);
1884 
1885           oop fwd_from = BrooksPointer::forwardee(_obj);
1886           oop fwd_to = BrooksPointer::forwardee(obj);
1887           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1888           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1889           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1890           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1891           size_t fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1892           size_t fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1893           tty->print_cr("forward(from-idx): " SIZE_FORMAT ", forward(to-idx): " SIZE_FORMAT, fwd_from_idx, fwd_to_idx);
1894           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1895         }
1896         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1897         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1898       }
1899 
1900       if (_map->parMark((HeapWord*) obj)) {
1901         _queue->push(SCMTask(obj));
1902       }
1903     }
1904   }
1905 
1906   void do_oop(oop* p) { do_oop_work(p); }
1907   void do_oop(narrowOop* p) { do_oop_work(p); }
1908   void set_obj(oop o) { _obj = o; }
1909 };
1910 
1911 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1912   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1913   guarantee(ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix),
1914             "only when these are enabled, and bitmap is initialized in ShenandoahHeap::initialize");
1915 
1916   OrderAccess::fence();
1917   ensure_parsability(false);
1918 
1919   // Allocate temporary bitmap for storing marking wavefront:
1920   MemRegion mr = MemRegion(_verification_bit_map.startWord(), _verification_bit_map.endWord());
1921   _verification_bit_map.clear_range_large(mr);
1922 
1923   // Initialize a single queue
1924   SCMObjToScanQueue* q = new SCMObjToScanQueue();
1925   q->initialize();
1926 
1927   // Scan root set
1928   ShenandoahRootProcessor rp(this, 1);
1929 
1930   {
1931     VerifyReachableHeapClosure cl(q, &_verification_bit_map, false);
1932     CLDToOopClosure cld_cl(&cl);
1933     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
1934     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
1935   }
1936 
1937   // Finish the scan
1938   {
1939     VerifyReachableHeapClosure cl(q, &_verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
1940     SCMTask task;
1941     while ((q->pop_buffer(task) ||
1942             q->pop_local(task) ||
1943             q->pop_overflow(task))) {
1944       oop obj = task.obj();
1945       assert(!oopDesc::is_null(obj), "must not be null");
1946       cl.set_obj(obj);
1947       obj->oop_iterate(&cl);
1948     }
1949   }
1950 
1951   // Clean up!
1952   delete(q);
1953 }
1954 
1955 void ShenandoahHeap::stop_concurrent_marking() {
1956   assert(concurrent_mark_in_progress(), "How else could we get here?");
1957   if (! cancelled_concgc()) {
1958     // If we needed to update refs, and concurrent marking has been cancelled,
1959     // we need to finish updating references.
1960     set_need_update_refs(false);
1961     swap_mark_bitmaps();
1962   }
1963   set_concurrent_mark_in_progress(false);
1964 
1965   if (log_is_enabled(Trace, gc, region)) {
1966     ResourceMark rm;
1967     outputStream* out = Log(gc, region)::trace_stream();
1968     print_heap_regions(out);
1969   }
1970 
1971 }
1972 
1973 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1974   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1975   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1976 }
1977 
1978 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1979   // Note: it is important to first release the _evacuation_in_progress flag here,
1980   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1981   // in case a VM task is pending.
1982   set_evacuation_in_progress(in_progress);
1983   MutexLocker mu(Threads_lock);
1984   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1985 }
1986 
1987 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1988   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1989   set_evacuation_in_progress(in_progress);
1990   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1991 }
1992 
1993 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1994   _evacuation_in_progress = in_progress ? 1 : 0;
1995   OrderAccess::fence();
1996 }
1997 
1998 void ShenandoahHeap::verify_copy(oop p,oop c){
1999     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2000     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2001     if (p->klass() != c->klass()) {
2002       print_heap_regions();
2003     }
2004     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2005     assert(p->size() == c->size(), "verify size");
2006     // Object may have been locked between copy and verification
2007     //    assert(p->mark() == c->mark(), "verify mark");
2008     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2009   }
2010 
2011 void ShenandoahHeap::oom_during_evacuation() {
2012   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2013                         Thread::current()->osthread()->thread_id());
2014 
2015   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2016   collector_policy()->set_should_clear_all_soft_refs(true);
2017   concurrent_thread()->try_set_full_gc();
2018   cancel_concgc(_oom_evacuation);
2019 
2020   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2021     assert(! Threads_lock->owned_by_self()
2022            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2023     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2024     while (_evacuation_in_progress) { // wait.
2025       Thread::current()->_ParkEvent->park(1);
2026     }
2027   }
2028 
2029 }
2030 
2031 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2032   // Initialize Brooks pointer for the next object
2033   HeapWord* result = obj + BrooksPointer::word_size();
2034   BrooksPointer::initialize(oop(result));
2035   return result;
2036 }
2037 
2038 uint ShenandoahHeap::oop_extra_words() {
2039   return BrooksPointer::word_size();
2040 }
2041 
2042 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2043   size_t old_num_regions = _num_regions;
2044   ensure_new_regions(num_regions);
2045   for (size_t i = 0; i < num_regions; i++) {
2046     size_t new_region_index = i + old_num_regions;
2047     HeapWord* start = ((HeapWord*) base()) + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * new_region_index;
2048     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::region_size_bytes() / HeapWordSize, new_region_index);
2049 
2050     if (log_is_enabled(Trace, gc, region)) {
2051       ResourceMark rm;
2052       outputStream* out = Log(gc, region)::trace_stream();
2053       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2054       new_region->print_on(out);
2055     }
2056 
2057     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2058     _ordered_regions->add_region(new_region);
2059     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2060     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2061     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2062 
2063     _free_regions->add_region(new_region);
2064   }
2065 }
2066 
2067 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2068 
2069   size_t num_regions = _num_regions;
2070   size_t new_num_regions = num_regions + new_regions;
2071   assert(new_num_regions <= _max_regions, "we checked this earlier");
2072 
2073   size_t expand_size = new_regions * ShenandoahHeapRegion::region_size_bytes();
2074   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2075   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2076   assert(success, "should always be able to expand by requested size");
2077 
2078   _num_regions = new_num_regions;
2079 
2080 }
2081 
2082 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2083   _heap(ShenandoahHeap::heap_no_check()) {
2084 }
2085 
2086 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2087   _heap = heap;
2088 }
2089 
2090 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2091 
2092   assert(_heap != NULL, "sanity");
2093   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2094 #ifdef ASSERT
2095   if (_heap->concurrent_mark_in_progress()) {
2096     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2097   }
2098 #endif
2099   assert(!oopDesc::is_null(obj), "null");
2100   return _heap->is_marked_next(obj);
2101 }
2102 
2103 void ShenandoahHeap::ref_processing_init() {
2104   MemRegion mr = reserved_region();
2105 
2106   isAlive.init(ShenandoahHeap::heap());
2107   assert(_max_workers > 0, "Sanity");
2108 
2109   _ref_processor =
2110     new ReferenceProcessor(mr,    // span
2111                            ParallelRefProcEnabled,
2112                            // mt processing
2113                            _max_workers,
2114                            // degree of mt processing
2115                            true,
2116                            // mt discovery
2117                            _max_workers,
2118                            // degree of mt discovery
2119                            false,
2120                            // Reference discovery is not atomic
2121                            &isAlive);
2122 }
2123 
2124 size_t ShenandoahHeap::num_regions() {
2125   return _num_regions;
2126 }
2127 
2128 size_t ShenandoahHeap::max_regions() {
2129   return _max_regions;
2130 }
2131 
2132 GCTracer* ShenandoahHeap::tracer() {
2133   return shenandoahPolicy()->tracer();
2134 }
2135 
2136 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2137   return _free_regions->used();
2138 }
2139 
2140 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2141   if (try_cancel_concgc()) {
2142     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2143     _shenandoah_policy->report_concgc_cancelled();
2144   }
2145 }
2146 
2147 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2148   if (try_cancel_concgc()) {
2149     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2150     _shenandoah_policy->report_concgc_cancelled();
2151   }
2152 }
2153 
2154 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2155   switch (cause) {
2156     case _oom_evacuation:
2157       return "Out of memory for evacuation";
2158     case _vm_stop:
2159       return "Stopping VM";
2160     default:
2161       return "Unknown";
2162   }
2163 }
2164 
2165 uint ShenandoahHeap::max_workers() {
2166   return _max_workers;
2167 }
2168 
2169 void ShenandoahHeap::stop() {
2170   // The shutdown sequence should be able to terminate when GC is running.
2171 
2172   // Step 1. Notify control thread that we are in shutdown.
2173   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2174   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2175   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2176 
2177   // Step 2. Notify GC workers that we are cancelling GC.
2178   cancel_concgc(_vm_stop);
2179 
2180   // Step 3. Wait until GC worker exits normally.
2181   _concurrent_gc_thread->stop();
2182 }
2183 
2184 void ShenandoahHeap::unload_classes_and_cleanup_tables() {
2185   ShenandoahForwardedIsAliveClosure is_alive;
2186   // Unload classes and purge SystemDictionary.
2187   bool purged_class = SystemDictionary::do_unloading(&is_alive, true);
2188   ParallelCleaningTask unlink_task(&is_alive, true, true, _workers->active_workers(), purged_class);
2189   _workers->run_task(&unlink_task);
2190   ClassLoaderDataGraph::purge();
2191 }
2192 
2193 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2194   _need_update_refs = need_update_refs;
2195 }
2196 
2197 //fixme this should be in heapregionset
2198 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2199   size_t region_idx = r->region_number() + 1;
2200   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2201   guarantee(next->region_number() == region_idx, "region number must match");
2202   while (next->is_humongous()) {
2203     region_idx = next->region_number() + 1;
2204     next = _ordered_regions->get(region_idx);
2205     guarantee(next->region_number() == region_idx, "region number must match");
2206   }
2207   return next;
2208 }
2209 
2210 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2211   _in_cset_fast_test_base[region_index] = b;
2212 }
2213 
2214 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2215   return _monitoring_support;
2216 }
2217 
2218 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2219   return _complete_mark_bit_map;
2220 }
2221 
2222 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2223   return _next_mark_bit_map;
2224 }
2225 
2226 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2227   _free_regions->add_region(r);
2228 }
2229 
2230 void ShenandoahHeap::clear_free_regions() {
2231   _free_regions->clear();
2232 }
2233 
2234 address ShenandoahHeap::in_cset_fast_test_addr() {
2235   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2236 }
2237 
2238 address ShenandoahHeap::cancelled_concgc_addr() {
2239   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2240 }
2241 
2242 void ShenandoahHeap::clear_cset_fast_test() {
2243   assert(_in_cset_fast_test_base != NULL, "sanity");
2244   memset(_in_cset_fast_test_base, false,
2245          _in_cset_fast_test_length * sizeof(bool));
2246 }
2247 
2248 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2249   return ShenandoahMaxRegionSize;
2250 }
2251 
2252 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2253   return _bytes_allocated_since_cm;
2254 }
2255 
2256 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2257   _bytes_allocated_since_cm = bytes;
2258 }
2259 
2260 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2261   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2262   _next_top_at_mark_starts[index] = addr;
2263 }
2264 
2265 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2266   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2267   return _next_top_at_mark_starts[index];
2268 }
2269 
2270 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2271   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2272   _complete_top_at_mark_starts[index] = addr;
2273 }
2274 
2275 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2276   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2277   return _complete_top_at_mark_starts[index];
2278 }
2279 
2280 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2281   _full_gc_in_progress = in_progress;
2282 }
2283 
2284 bool ShenandoahHeap::is_full_gc_in_progress() const {
2285   return _full_gc_in_progress;
2286 }
2287 
2288 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2289   _update_refs_in_progress = in_progress;
2290 }
2291 
2292 bool ShenandoahHeap::is_update_refs_in_progress() const {
2293   return _update_refs_in_progress;
2294 }
2295 
2296 class NMethodOopInitializer : public OopClosure {
2297 private:
2298   ShenandoahHeap* _heap;
2299 public:
2300   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2301   }
2302 
2303 private:
2304   template <class T>
2305   inline void do_oop_work(T* p) {
2306     T o = oopDesc::load_heap_oop(p);
2307     if (! oopDesc::is_null(o)) {
2308       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2309       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2310       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2311         oopDesc::encode_store_heap_oop(p, obj2);
2312       }
2313     }
2314   }
2315 
2316 public:
2317   void do_oop(oop* o) {
2318     do_oop_work(o);
2319   }
2320   void do_oop(narrowOop* o) {
2321     do_oop_work(o);
2322   }
2323 };
2324 
2325 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2326   NMethodOopInitializer init;
2327   nm->oops_do(&init);
2328   nm->fix_oop_relocations();
2329 }
2330 
2331 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2332 }
2333 
2334 void ShenandoahHeap::pin_object(oop o) {
2335   heap_region_containing(o)->pin();
2336 }
2337 
2338 void ShenandoahHeap::unpin_object(oop o) {
2339   heap_region_containing(o)->unpin();
2340 }
2341 
2342 
2343 GCTimer* ShenandoahHeap::gc_timer() const {
2344   return _gc_timer;
2345 }
2346 
2347 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2348 private:
2349   size_t _garbage;
2350 public:
2351   ShenandoahCountGarbageClosure() : _garbage(0) {
2352   }
2353 
2354   bool doHeapRegion(ShenandoahHeapRegion* r) {
2355     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2356       _garbage += r->garbage();
2357     }
2358     return false;
2359   }
2360 
2361   size_t garbage() {
2362     return _garbage;
2363   }
2364 };
2365 
2366 size_t ShenandoahHeap::garbage() {
2367   ShenandoahCountGarbageClosure cl;
2368   heap_region_iterate(&cl);
2369   return cl.garbage();
2370 }
2371 
2372 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2373   return _connection_matrix;
2374 }
2375 
2376 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2377   return _partial_gc;
2378 }
2379 
2380 void ShenandoahHeap::do_partial_collection() {
2381   partial_gc()->do_partial_collection();
2382 }
2383 
2384 template<class T>
2385 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2386 private:
2387   T cl;
2388   ShenandoahHeap* _heap;
2389   ShenandoahHeapRegionSet* _regions;
2390 
2391 public:
2392   ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions) :
2393     AbstractGangTask("Concurrent Update References Task"),
2394     cl(T()),
2395     _heap(ShenandoahHeap::heap()),
2396     _regions(regions) {
2397   }
2398 
2399   void work(uint worker_id) {
2400     ShenandoahHeapRegion* r = _regions->claim_next();
2401     while (r != NULL) {
2402       if (_heap->in_collection_set(r)) {
2403         HeapWord* bottom = r->bottom();
2404         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2405         if (top > bottom) {
2406           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2407         }
2408       } else {
2409         if (!r->is_empty()) {
2410           _heap->marked_object_oop_safe_iterate(r, &cl);
2411         }
2412       }
2413       if (_heap->cancelled_concgc()) {
2414         return;
2415       }
2416       r = _regions->claim_next();
2417     }
2418   }
2419 };
2420 
2421 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions) {
2422   if (UseShenandoahMatrix) {
2423     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions);
2424     workers()->run_task(&task);
2425   } else {
2426     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions);
2427     workers()->run_task(&task);
2428   }
2429 }
2430 
2431 void ShenandoahHeap::concurrent_update_heap_references() {
2432   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs);
2433   ShenandoahHeapRegionSet* update_regions = regions();
2434   update_regions->clear_current_index();
2435   update_heap_references(update_regions);
2436   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs);
2437 }
2438 
2439 void ShenandoahHeap::prepare_update_refs() {
2440   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2441   set_evacuation_in_progress_at_safepoint(false);
2442   set_update_refs_in_progress(true);
2443   ensure_parsability(true);
2444   if (UseShenandoahMatrix) {
2445     connection_matrix()->clear_all();
2446   }
2447   for (uint i = 0; i < _num_regions; i++) {
2448     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2449     r->set_concurrent_iteration_safe_limit(r->top());
2450   }
2451 }
2452 
2453 void ShenandoahHeap::finish_update_refs() {
2454   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2455 
2456   if (cancelled_concgc()) {
2457     // Finish updating references where we left off.
2458     clear_cancelled_concgc();
2459     ShenandoahHeapRegionSet* update_regions = regions();
2460     update_heap_references(update_regions);
2461   }
2462 
2463   assert(! cancelled_concgc(), "Should have been done right before");
2464   concurrentMark()->update_roots(ShenandoahCollectorPolicy::final_update_refs_roots);
2465 
2466   // Allocations might have happened before we STWed here, record peak:
2467   shenandoahPolicy()->record_peak_occupancy();
2468 
2469   recycle_dirty_regions();
2470   set_need_update_refs(false);
2471 
2472   if (ShenandoahVerify) {
2473     verify_update_refs();
2474   }
2475 
2476   {
2477     // Rebuild the free set
2478     ShenandoahHeapLock hl(this);
2479     _free_regions->clear();
2480     size_t end = _ordered_regions->active_regions();
2481     for (size_t i = 0; i < end; i++) {
2482       ShenandoahHeapRegion* r = _ordered_regions->get(i);
2483       if (!r->is_humongous()) {
2484         assert (!in_collection_set(r), "collection set should be clear");
2485         _free_regions->add_region(r);
2486       }
2487     }
2488   }
2489   set_update_refs_in_progress(false);
2490 }
2491 
2492 class ShenandoahVerifyUpdateRefsClosure : public ExtendedOopClosure {
2493 private:
2494   template <class T>
2495   void do_oop_work(T* p) {
2496     T o = oopDesc::load_heap_oop(p);
2497     if (! oopDesc::is_null(o)) {
2498       oop obj = oopDesc::decode_heap_oop_not_null(o);
2499       guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
2500                 "must not be forwarded");
2501     }
2502   }
2503 public:
2504   void do_oop(oop* p) { do_oop_work(p); }
2505   void do_oop(narrowOop* p) { do_oop_work(p); }
2506 };
2507 
2508 void ShenandoahHeap::verify_update_refs() {
2509 
2510   ensure_parsability(false);
2511 
2512   ShenandoahVerifyUpdateRefsClosure cl;
2513 
2514   // Verify roots.
2515   {
2516     CodeBlobToOopClosure blobsCl(&cl, false);
2517     CLDToOopClosure cldCl(&cl);
2518     ShenandoahRootProcessor rp(this, 1);
2519     rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
2520   }
2521 
2522   // Verify heap.
2523   for (uint i = 0; i < num_regions(); i++) {
2524     ShenandoahHeapRegion* r = regions()->get(i);
2525     marked_object_oop_iterate(r, &cl);
2526   }
2527 }
2528 
2529 #ifdef ASSERT
2530 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2531   assert(_heap_lock == locked, "must be locked");
2532   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2533 }
2534 
2535 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2536   Thread* thr = Thread::current();
2537   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2538          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2539   "must own heap lock or by VM thread at safepoint");
2540 }
2541 
2542 #endif
2543 
2544 void ShenandoahHeap::start_deferred_recycling() {
2545   assert_heaplock_owned_by_current_thread();
2546   _recycled_region_count = 0;
2547 }
2548 
2549 void ShenandoahHeap::defer_recycle(ShenandoahHeapRegion* r) {
2550   assert_heaplock_owned_by_current_thread();
2551   _recycled_regions[_recycled_region_count++] = r->region_number();
2552 }
2553 
2554 void ShenandoahHeap::finish_deferred_recycle() {
2555   assert_heaplock_owned_by_current_thread();
2556   if (UseShenandoahMatrix) {
2557     for (size_t i = 0; i < _recycled_region_count; i++) {
2558       regions()->get(_recycled_regions[i])->recycle_no_matrix();
2559     }
2560     connection_matrix()->clear_batched(_recycled_regions, _recycled_region_count);
2561   } else {
2562     for (size_t i = 0; i < _recycled_region_count; i++) {
2563       regions()->get(_recycled_regions[i])->recycle();
2564     }
2565   }
2566 }
2567 
2568 void ShenandoahHeap::deflate_idle_monitors_all_threads() {
2569   parallel_deflate_idle_monitors(workers());
2570 }