1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/g1/heapRegionBounds.inline.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 const char* ShenandoahHeap::name() const {
  51   return "Shenandoah";
  52 }
  53 
  54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  55   HeapWord* cur = NULL;
  56   for (cur = start; cur < end; cur++) {
  57     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  58   }
  59 }
  60 
  61 class PrintHeapRegionsClosure : public
  62    ShenandoahHeapRegionClosure {
  63 private:
  64   outputStream* _st;
  65 public:
  66   PrintHeapRegionsClosure() : _st(tty) {}
  67   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  68 
  69   bool doHeapRegion(ShenandoahHeapRegion* r) {
  70     r->print_on(_st);
  71     return false;
  72   }
  73 };
  74 
  75 class ShenandoahPretouchTask : public AbstractGangTask {
  76 private:
  77   char* volatile _cur_addr;
  78   char* const _start_addr;
  79   char* const _end_addr;
  80   size_t const _page_size;
  81 public:
  82   ShenandoahPretouchTask(char* start_address, char* end_address, size_t page_size) :
  83     AbstractGangTask("Shenandoah PreTouch",
  84                      Universe::is_fully_initialized() ? GCId::current_raw() :
  85                                                         // During VM initialization there is
  86                                                         // no GC cycle that this task can be
  87                                                         // associated with.
  88                                                         GCId::undefined()),
  89     _cur_addr(start_address),
  90     _start_addr(start_address),
  91     _end_addr(end_address),
  92     _page_size(page_size) {
  93   }
  94 
  95   virtual void work(uint worker_id) {
  96     size_t const actual_chunk_size = MAX2(PreTouchParallelChunkSize, _page_size);
  97     while (true) {
  98       char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
  99       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
 100         break;
 101       }
 102       char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));
 103       os::pretouch_memory(touch_addr, end_addr, _page_size);
 104     }
 105   }
 106 };
 107 
 108 void ShenandoahHeap::pretouch_storage(char* start, char* end, WorkGang* workers) {
 109   assert (ShenandoahAlwaysPreTouch, "Sanity");
 110   assert (!AlwaysPreTouch, "Should have been overridden");
 111 
 112   size_t size = (size_t)(end - start);
 113   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 114   size_t num_chunks = MAX2((size_t)1, size / MAX2(PreTouchParallelChunkSize, page_size));
 115   uint num_workers = MIN2((uint)num_chunks, workers->active_workers());
 116 
 117   log_info(gc, heap)("Parallel pretouch with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT " bytes.",
 118                       num_workers, num_chunks, size);
 119 
 120   ShenandoahPretouchTask cl(start, end, page_size);
 121   workers->run_task(&cl, num_workers);
 122 }
 123 
 124 jint ShenandoahHeap::initialize() {
 125   CollectedHeap::pre_initialize();
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 129 
 130   Universe::check_alignment(max_byte_size,
 131                             ShenandoahHeapRegion::RegionSizeBytes,
 132                             "shenandoah heap");
 133   Universe::check_alignment(init_byte_size,
 134                             ShenandoahHeapRegion::RegionSizeBytes,
 135                             "shenandoah heap");
 136 
 137   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 138                                                  Arguments::conservative_max_heap_alignment());
 139   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 140 
 141   set_barrier_set(new ShenandoahBarrierSet(this));
 142   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 143   _storage.initialize(pgc_rs, init_byte_size);
 144   if (ShenandoahAlwaysPreTouch) {
 145     pretouch_storage(_storage.low(), _storage.high(), _workers);
 146   }
 147 
 148   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 149   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 150   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 151   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 152   assert(init_byte_size == _initialSize, "tautology");
 153   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 154   _sorted_regions = new ShenandoahHeapRegionSet(_max_regions);
 155   _collection_set = new ShenandoahCollectionSet(_max_regions);
 156   _free_regions = new ShenandoahFreeSet(_max_regions);
 157 
 158   // Initialize fast collection set test structure.
 159   _in_cset_fast_test_length = _max_regions;
 160   _in_cset_fast_test_base =
 161                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 162   _in_cset_fast_test = _in_cset_fast_test_base -
 163                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 164 
 165   _next_top_at_mark_starts_base =
 166                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 167   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 168                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 169 
 170   _complete_top_at_mark_starts_base =
 171                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 172   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 173                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 174 
 175   size_t i = 0;
 176   for (i = 0; i < _num_regions; i++) {
 177     _in_cset_fast_test_base[i] = false; // Not in cset
 178     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 179     _complete_top_at_mark_starts_base[i] = bottom;
 180     _next_top_at_mark_starts_base[i] = bottom;
 181   }
 182 
 183   {
 184     ShenandoahHeapLock lock(this);
 185     for (i = 0; i < _num_regions; i++) {
 186       ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
 187       current->initialize_heap_region(this, (HeapWord*) pgc_rs.base() +
 188                                       regionSizeWords * i, regionSizeWords, i);
 189       _free_regions->add_region(current);
 190       _ordered_regions->add_region(current);
 191       _sorted_regions->add_region(current);
 192     }
 193   }
 194   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 195   _first_region = _ordered_regions->get(0);
 196   _first_region_bottom = _first_region->bottom();
 197   assert((((size_t) _first_region_bottom) &
 198           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 199          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 200 
 201   _numAllocs = 0;
 202 
 203   if (log_is_enabled(Trace, gc, region)) {
 204     ResourceMark rm;
 205     outputStream* out = Log(gc, region)::trace_stream();
 206     log_trace(gc, region)("All Regions");
 207     _ordered_regions->print(out);
 208     log_trace(gc, region)("Free Regions");
 209     _free_regions->print(out);
 210   }
 211 
 212   // The call below uses stuff (the SATB* things) that are in G1, but probably
 213   // belong into a shared location.
 214   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 215                                                SATB_Q_FL_lock,
 216                                                20 /*G1SATBProcessCompletedThreshold */,
 217                                                Shared_SATB_Q_lock);
 218 
 219   // Reserve space for prev and next bitmap.
 220   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 221   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 222 
 223   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 224 
 225   ReservedSpace bitmap0(bitmap_size, page_size);
 226   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 227   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 228   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 229   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 230   _complete_mark_bit_map = &_mark_bit_map0;
 231 
 232   ReservedSpace bitmap1(bitmap_size, page_size);
 233   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 234   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 235   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 236   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 237   _next_mark_bit_map = &_mark_bit_map1;
 238 
 239   _monitoring_support = new ShenandoahMonitoringSupport(this);
 240 
 241   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 242 
 243   ShenandoahMarkCompact::initialize();
 244 
 245   return JNI_OK;
 246 }
 247 
 248 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 249   CollectedHeap(),
 250   _shenandoah_policy(policy),
 251   _concurrent_mark_in_progress(0),
 252   _evacuation_in_progress(0),
 253   _full_gc_in_progress(false),
 254   _free_regions(NULL),
 255   _collection_set(NULL),
 256   _bytes_allocated_since_cm(0),
 257   _bytes_allocated_during_cm(0),
 258   _max_allocated_gc(0),
 259   _allocated_last_gc(0),
 260   _used_start_gc(0),
 261   _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
 262   _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
 263   _ref_processor(NULL),
 264   _in_cset_fast_test(NULL),
 265   _in_cset_fast_test_base(NULL),
 266   _next_top_at_mark_starts(NULL),
 267   _next_top_at_mark_starts_base(NULL),
 268   _complete_top_at_mark_starts(NULL),
 269   _complete_top_at_mark_starts_base(NULL),
 270   _mark_bit_map0(),
 271   _mark_bit_map1(),
 272   _cancelled_concgc(false),
 273   _need_update_refs(false),
 274   _need_reset_bitmaps(false),
 275   _heap_lock(0),
 276 #ifdef ASSERT
 277   _heap_lock_owner(NULL),
 278 #endif
 279   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 280 
 281 {
 282   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 283   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 284   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 285 
 286   _scm = new ShenandoahConcurrentMark();
 287   _used = 0;
 288 
 289   // This is odd.  They are concurrent gc threads, but they are also task threads.
 290   // Framework doesn't allow both.
 291   _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads,
 292                             /* are_GC_task_threads */true,
 293                             /* are_ConcurrentGC_threads */false);
 294   _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads,
 295                             /* are_GC_task_threads */true,
 296                             /* are_ConcurrentGC_threads */false);
 297   if ((_workers == NULL) || (_conc_workers == NULL)) {
 298     vm_exit_during_initialization("Failed necessary allocation.");
 299   } else {
 300     _workers->initialize_workers();
 301     _conc_workers->initialize_workers();
 302   }
 303 }
 304 
 305 class ResetNextBitmapTask : public AbstractGangTask {
 306 private:
 307   ShenandoahHeapRegionSet* _regions;
 308 
 309 public:
 310   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 311     AbstractGangTask("Parallel Reset Bitmap Task"),
 312     _regions(regions) {
 313     _regions->clear_current_index();
 314   }
 315 
 316   void work(uint worker_id) {
 317     ShenandoahHeapRegion* region = _regions->claim_next();
 318     ShenandoahHeap* heap = ShenandoahHeap::heap();
 319     while (region != NULL) {
 320       HeapWord* bottom = region->bottom();
 321       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 322       if (top > bottom) {
 323         heap->next_mark_bit_map()->clear_range(MemRegion(bottom, top));
 324       }
 325       region = _regions->claim_next();
 326     }
 327   }
 328 };
 329 
 330 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 331   GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc);
 332 
 333   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 334   workers->run_task(&task);
 335 }
 336 
 337 class ResetCompleteBitmapTask : public AbstractGangTask {
 338 private:
 339   ShenandoahHeapRegionSet* _regions;
 340 
 341 public:
 342   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 343     AbstractGangTask("Parallel Reset Bitmap Task"),
 344     _regions(regions) {
 345     _regions->clear_current_index();
 346   }
 347 
 348   void work(uint worker_id) {
 349     ShenandoahHeapRegion* region = _regions->claim_next();
 350     ShenandoahHeap* heap = ShenandoahHeap::heap();
 351     while (region != NULL) {
 352       HeapWord* bottom = region->bottom();
 353       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 354       if (top > bottom) {
 355         heap->complete_mark_bit_map()->clear_range(MemRegion(bottom, top));
 356       }
 357       region = _regions->claim_next();
 358     }
 359   }
 360 };
 361 
 362 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 363   GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc);
 364 
 365   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 366   workers->run_task(&task);
 367 }
 368 
 369 bool ShenandoahHeap::is_next_bitmap_clear() {
 370   HeapWord* start = _ordered_regions->bottom();
 371   HeapWord* end = _ordered_regions->end();
 372   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 373 }
 374 
 375 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 376   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 377 }
 378 
 379 void ShenandoahHeap::print_on(outputStream* st) const {
 380   st->print("Shenandoah Heap");
 381   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 382   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 383   if (_concurrent_mark_in_progress) {
 384     st->print("marking ");
 385   }
 386   if (_evacuation_in_progress) {
 387     st->print("evacuating ");
 388   }
 389   if (cancelled_concgc()) {
 390     st->print("cancelled ");
 391   }
 392   st->print("\n");
 393 
 394   if (Verbose) {
 395     print_heap_regions(st);
 396   }
 397 }
 398 
 399 class InitGCLABClosure : public ThreadClosure {
 400 public:
 401   void do_thread(Thread* thread) {
 402     thread->gclab().initialize(true);
 403   }
 404 };
 405 
 406 void ShenandoahHeap::post_initialize() {
 407 
 408   {
 409     if (UseTLAB) {
 410       InitGCLABClosure init_gclabs;
 411       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 412         init_gclabs.do_thread(thread);
 413       }
 414       gc_threads_do(&init_gclabs);
 415     }
 416   }
 417 
 418   _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
 419   _scm->initialize(_max_workers);
 420 
 421   ref_processing_init();
 422 }
 423 
 424 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 425   size_t sum;
 426 public:
 427 
 428   CalculateUsedRegionClosure() {
 429     sum = 0;
 430   }
 431 
 432   bool doHeapRegion(ShenandoahHeapRegion* r) {
 433     sum = sum + r->used();
 434     return false;
 435   }
 436 
 437   size_t getResult() { return sum;}
 438 };
 439 
 440 size_t ShenandoahHeap::calculateUsed() {
 441   CalculateUsedRegionClosure cl;
 442   heap_region_iterate(&cl);
 443   return cl.getResult();
 444 }
 445 
 446 void ShenandoahHeap::verify_heap_size_consistency() {
 447 
 448   assert(calculateUsed() == used(),
 449          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 450 }
 451 
 452 size_t ShenandoahHeap::used() const {
 453   OrderAccess::acquire();
 454   return _used;
 455 }
 456 
 457 void ShenandoahHeap::increase_used(size_t bytes) {
 458   Atomic::add(bytes, &_used);
 459 }
 460 
 461 void ShenandoahHeap::set_used(size_t bytes) {
 462   _used = bytes;
 463   OrderAccess::release();
 464 }
 465 
 466 void ShenandoahHeap::decrease_used(size_t bytes) {
 467   assert(_used >= bytes, "never decrease heap size by more than we've left");
 468   Atomic::add(-bytes, &_used);
 469 }
 470 
 471 size_t ShenandoahHeap::capacity() const {
 472   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 473 
 474 }
 475 
 476 bool ShenandoahHeap::is_maximal_no_gc() const {
 477   Unimplemented();
 478   return true;
 479 }
 480 
 481 size_t ShenandoahHeap::max_capacity() const {
 482   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 483 }
 484 
 485 size_t ShenandoahHeap::min_capacity() const {
 486   return _initialSize;
 487 }
 488 
 489 VirtualSpace* ShenandoahHeap::storage() const {
 490   return (VirtualSpace*) &_storage;
 491 }
 492 
 493 bool ShenandoahHeap::is_in(const void* p) const {
 494   HeapWord* first_region_bottom = _first_region->bottom();
 495   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 496   return p >= _first_region_bottom && p < last_region_end;
 497 }
 498 
 499 bool ShenandoahHeap::is_scavengable(const void* p) {
 500   return true;
 501 }
 502 
 503 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 504   // Retain tlab and allocate object in shared space if
 505   // the amount free in the tlab is too large to discard.
 506   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 507     thread->gclab().record_slow_allocation(size);
 508     return NULL;
 509   }
 510 
 511   // Discard gclab and allocate a new one.
 512   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 513   size_t new_gclab_size = thread->gclab().compute_size(size);
 514 
 515   thread->gclab().clear_before_allocation();
 516 
 517   if (new_gclab_size == 0) {
 518     return NULL;
 519   }
 520 
 521   // Allocate a new GCLAB...
 522   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 523   if (obj == NULL) {
 524     return NULL;
 525   }
 526 
 527   if (ZeroTLAB) {
 528     // ..and clear it.
 529     Copy::zero_to_words(obj, new_gclab_size);
 530   } else {
 531     // ...and zap just allocated object.
 532 #ifdef ASSERT
 533     // Skip mangling the space corresponding to the object header to
 534     // ensure that the returned space is not considered parsable by
 535     // any concurrent GC thread.
 536     size_t hdr_size = oopDesc::header_size();
 537     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 538 #endif // ASSERT
 539   }
 540   thread->gclab().fill(obj, obj + size, new_gclab_size);
 541   return obj;
 542 }
 543 
 544 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 545   return allocate_new_tlab(word_size, false);
 546 }
 547 
 548 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 549   return allocate_new_tlab(word_size, true);
 550 }
 551 
 552 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 553   HeapWord* result = allocate_memory(word_size, evacuating);
 554 
 555   if (result != NULL) {
 556     assert(! in_collection_set(result), "Never allocate in dirty region");
 557     _bytes_allocated_since_cm += word_size * HeapWordSize;
 558 
 559     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 560 
 561   }
 562   return result;
 563 }
 564 
 565 ShenandoahHeap* ShenandoahHeap::heap() {
 566   CollectedHeap* heap = Universe::heap();
 567   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 568   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 569   return (ShenandoahHeap*) heap;
 570 }
 571 
 572 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 573   CollectedHeap* heap = Universe::heap();
 574   return (ShenandoahHeap*) heap;
 575 }
 576 
 577 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 578 
 579   ShenandoahHeapLock heap_lock(this);
 580 
 581   HeapWord* result = allocate_memory_under_lock(word_size);
 582   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 583 
 584   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 585     grow_heap_by(grow_by);
 586     result = allocate_memory_under_lock(word_size);
 587   }
 588 
 589   return result;
 590 }
 591 
 592 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 593   HeapWord* result = NULL;
 594   result = allocate_memory_work(word_size);
 595 
 596   if (!evacuating) {
 597     // Allocation failed, try full-GC, then retry allocation.
 598     //
 599     // It might happen that one of the threads requesting allocation would unblock
 600     // way later after full-GC happened, only to fail the second allocation, because
 601     // other threads have already depleted the free storage. In this case, a better
 602     // strategy would be to try full-GC again.
 603     //
 604     // Lacking the way to detect progress from "collect" call, we are left with blindly
 605     // retrying for some bounded number of times.
 606     // TODO: Poll if Full GC made enough progress to warrant retry.
 607     int tries = 0;
 608     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 609       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 610                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 611       collect(GCCause::_allocation_failure);
 612       result = allocate_memory_work(word_size);
 613     }
 614   }
 615 
 616   // Only update monitoring counters when not calling from a write-barrier.
 617   // Otherwise we might attempt to grab the Service_lock, which we must
 618   // not do when coming from a write-barrier (because the thread might
 619   // already hold the Compile_lock).
 620   if (! evacuating) {
 621     monitoring_support()->update_counters();
 622   }
 623 
 624   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 625                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 626 
 627   return result;
 628 }
 629 
 630 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 631   return evacuating && Thread::current()->is_Java_thread();
 632 }
 633 
 634 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 635   assert_heaplock_owned_by_current_thread();
 636 
 637   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 638     return allocate_large_memory(word_size);
 639   }
 640 
 641   // Not enough memory in free region set.
 642   // Coming out of full GC, it is possible that there is not
 643   // free region available, so current_index may not be valid.
 644   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 645 
 646   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 647 
 648   if (my_current_region == NULL) {
 649     return NULL; // No more room to make a new region. OOM.
 650   }
 651   assert(my_current_region != NULL, "should have a region at this point");
 652 
 653 #ifdef ASSERT
 654   if (in_collection_set(my_current_region)) {
 655     print_heap_regions();
 656   }
 657 #endif
 658   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 659   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 660 
 661   HeapWord* result = my_current_region->par_allocate(word_size);
 662 
 663   while (result == NULL) {
 664     // 2nd attempt. Try next region.
 665     _free_regions->increase_used(my_current_region->free());
 666     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 667     assert(next_region != my_current_region, "must not get current again");
 668     my_current_region = next_region;
 669 
 670     if (my_current_region == NULL) {
 671       return NULL; // No more room to make a new region. OOM.
 672     }
 673     assert(my_current_region != NULL, "should have a region at this point");
 674     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 675     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 676     result = my_current_region->par_allocate(word_size);
 677   }
 678 
 679   my_current_region->increase_live_data(word_size * HeapWordSize);
 680   increase_used(word_size * HeapWordSize);
 681   _free_regions->increase_used(word_size * HeapWordSize);
 682   return result;
 683 }
 684 
 685 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 686   assert_heaplock_owned_by_current_thread();
 687 
 688   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 689   if (required_regions > _max_regions) return NULL;
 690 
 691   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 692 
 693   HeapWord* result = NULL;
 694 
 695   if (r != NULL)  {
 696     result = r->bottom();
 697 
 698     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 699                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 700   } else {
 701     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 702                              (words * HeapWordSize) / K, p2i(result));
 703   }
 704 
 705 
 706   return result;
 707 
 708 }
 709 
 710 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 711                                         bool*  gc_overhead_limit_was_exceeded) {
 712 
 713 #ifdef ASSERT
 714   if (ShenandoahVerify && _numAllocs > 1000000) {
 715     _numAllocs = 0;
 716   }
 717   _numAllocs++;
 718 #endif
 719   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 720   HeapWord* result = filler + BrooksPointer::word_size();
 721   if (filler != NULL) {
 722     BrooksPointer::initialize(oop(result));
 723     _bytes_allocated_since_cm += size * HeapWordSize;
 724 
 725     assert(! in_collection_set(result), "never allocate in targetted region");
 726     return result;
 727   } else {
 728     /*
 729     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 730                   size, used(), _bytes_allocated_since_cm);
 731     {
 732       print_heap_regions();
 733       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 734       _free_regions->print();
 735     }
 736     */
 737     return NULL;
 738   }
 739 }
 740 
 741 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 742 private:
 743   ShenandoahHeap* _heap;
 744   Thread* _thread;
 745   public:
 746   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 747     _heap(heap), _thread(Thread::current()) {
 748   }
 749 
 750   void do_object(oop p) {
 751 
 752     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 753 
 754     assert(_heap->is_marked_complete(p), "expect only marked objects");
 755     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 756       _heap->evacuate_object(p, _thread);
 757     }
 758   }
 759 };
 760 
 761 #ifdef ASSERT
 762 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 763 
 764 public:
 765 
 766   void do_object(oop p) {
 767     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 768       oop p_prime = oopDesc::bs()->read_barrier(p);
 769       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 770       if (p->klass() != p_prime->klass()) {
 771         tty->print_cr("copy has different class than original:");
 772         p->klass()->print_on(tty);
 773         p_prime->klass()->print_on(tty);
 774       }
 775       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 776       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 777       assert(p->size() == p_prime->size(), "Should be the same size");
 778       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 779     }
 780   }
 781 };
 782 
 783 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 784   VerifyEvacuatedObjectClosure verify_evacuation;
 785   marked_object_iterate(from_region, &verify_evacuation);
 786 }
 787 #endif
 788 
 789 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 790 
 791   assert(from_region->get_live_data() > 0, "all-garbage regions are reclaimed earlier");
 792 
 793   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 794 
 795   marked_object_iterate(from_region, &evacuate_region);
 796 
 797 #ifdef ASSERT
 798   if (ShenandoahVerify && ! cancelled_concgc()) {
 799     verify_evacuated_region(from_region);
 800   }
 801 #endif
 802 }
 803 
 804 class ParallelEvacuationTask : public AbstractGangTask {
 805 private:
 806   ShenandoahHeap* _sh;
 807   ShenandoahCollectionSet* _cs;
 808 
 809 public:
 810   ParallelEvacuationTask(ShenandoahHeap* sh,
 811                          ShenandoahCollectionSet* cs) :
 812     AbstractGangTask("Parallel Evacuation Task"),
 813     _cs(cs),
 814     _sh(sh) {}
 815 
 816   void work(uint worker_id) {
 817 
 818     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 819 
 820     while (from_hr != NULL) {
 821       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 822                                     worker_id,
 823                                     from_hr->region_number());
 824 
 825       assert(from_hr->get_live_data() > 0, "all-garbage regions are reclaimed early");
 826       _sh->parallel_evacuate_region(from_hr);
 827 
 828       if (_sh->cancelled_concgc()) {
 829         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 830         break;
 831       }
 832       from_hr = _cs->claim_next();
 833     }
 834   }
 835 };
 836 
 837 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 838 private:
 839   ShenandoahHeap* _heap;
 840   size_t _bytes_reclaimed;
 841 public:
 842   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 843 
 844   bool doHeapRegion(ShenandoahHeapRegion* r) {
 845 
 846     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 847 
 848     if (_heap->in_collection_set(r)) {
 849       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 850       _heap->decrease_used(r->used());
 851       _bytes_reclaimed += r->used();
 852       r->recycle();
 853     }
 854 
 855     return false;
 856   }
 857   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 858   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 859 };
 860 
 861 void ShenandoahHeap::recycle_dirty_regions() {
 862   RecycleDirtyRegionsClosure cl;
 863   cl.clear_bytes_reclaimed();
 864 
 865   heap_region_iterate(&cl);
 866 
 867   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 868   if (! cancelled_concgc()) {
 869     clear_cset_fast_test();
 870   }
 871 }
 872 
 873 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 874   return _free_regions;
 875 }
 876 
 877 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 878   _ordered_regions->print(st);
 879 }
 880 
 881 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 882 private:
 883   int _index;
 884   const char* _prefix;
 885 
 886 public:
 887   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 888 
 889 private:
 890   template <class T>
 891   inline void do_oop_work(T* p) {
 892     oop o = oopDesc::load_decode_heap_oop(p);
 893     if (o != NULL) {
 894       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 895         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 896                       _prefix, _index,
 897                       p2i(p), p2i(o),
 898                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 899                       o->klass()->internal_name(), p2i(o->klass()));
 900       } else {
 901         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 902                       _prefix, _index,
 903                       p2i(p), p2i(o));
 904       }
 905     } else {
 906       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 907     }
 908     _index++;
 909   }
 910 
 911 public:
 912   void do_oop(oop* p) {
 913     do_oop_work(p);
 914   }
 915 
 916   void do_oop(narrowOop* p) {
 917     do_oop_work(p);
 918   }
 919 
 920 };
 921 
 922 class PrintAllRefsObjectClosure : public ObjectClosure {
 923   const char* _prefix;
 924 
 925 public:
 926   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 927 
 928   void do_object(oop p) {
 929     if (ShenandoahHeap::heap()->is_in(p)) {
 930         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 931                       _prefix, p2i(p),
 932                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 933                       p->klass()->internal_name(), p2i(p->klass()));
 934         PrintAllRefsOopClosure cl(_prefix);
 935         p->oop_iterate(&cl);
 936       }
 937   }
 938 };
 939 
 940 void ShenandoahHeap::print_all_refs(const char* prefix) {
 941   tty->print_cr("printing all references in the heap");
 942   tty->print_cr("root references:");
 943 
 944   ensure_parsability(false);
 945 
 946   PrintAllRefsOopClosure cl(prefix);
 947   roots_iterate(&cl);
 948 
 949   tty->print_cr("heap references:");
 950   PrintAllRefsObjectClosure cl2(prefix);
 951   object_iterate(&cl2);
 952 }
 953 
 954 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 955 private:
 956   ShenandoahHeap*  _heap;
 957 
 958 public:
 959   VerifyAfterMarkingOopClosure() :
 960     _heap(ShenandoahHeap::heap()) { }
 961 
 962 private:
 963   template <class T>
 964   inline void do_oop_work(T* p) {
 965     oop o = oopDesc::load_decode_heap_oop(p);
 966     if (o != NULL) {
 967       if (! _heap->is_marked_complete(o)) {
 968         _heap->print_heap_regions();
 969         _heap->print_all_refs("post-mark");
 970         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
 971                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
 972         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
 973 
 974         tty->print_cr("oop class: %s", o->klass()->internal_name());
 975         if (_heap->is_in(p)) {
 976           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
 977           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
 978           referrer->print();
 979           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
 980         }
 981         tty->print_cr("heap region containing object:");
 982         _heap->heap_region_containing(o)->print();
 983         tty->print_cr("heap region containing referrer:");
 984         _heap->heap_region_containing(p)->print();
 985         tty->print_cr("heap region containing forwardee:");
 986         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
 987       }
 988       assert(o->is_oop(), "oop must be an oop");
 989       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
 990       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
 991         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
 992                       p2i(p),
 993                       BOOL_TO_STR(_heap->in_collection_set(p)),
 994                       p2i(o),
 995                       BOOL_TO_STR(_heap->in_collection_set(o)),
 996                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
 997                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
 998         tty->print_cr("oop class: %s", o->klass()->internal_name());
 999       }
1000       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1001       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1002       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1003     }
1004   }
1005 
1006 public:
1007   void do_oop(oop* p) {
1008     do_oop_work(p);
1009   }
1010 
1011   void do_oop(narrowOop* p) {
1012     do_oop_work(p);
1013   }
1014 
1015 };
1016 
1017 void ShenandoahHeap::verify_heap_after_marking() {
1018 
1019   verify_heap_size_consistency();
1020 
1021   log_trace(gc)("verifying heap after marking");
1022 
1023   VerifyAfterMarkingOopClosure cl;
1024   roots_iterate(&cl);
1025   ObjectToOopClosure objs(&cl);
1026   object_iterate(&objs);
1027 }
1028 
1029 
1030 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1031   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1032 
1033   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1034   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1035   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1036   uint index = r->region_number();
1037 
1038 
1039   assert(r->get_live_data() == 0, "liveness must be zero");
1040 
1041   for(size_t i = 0; i < required_regions; i++) {
1042 
1043     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1044 
1045     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1046            "expect correct humongous start or continuation");
1047 
1048     if (log_is_enabled(Debug, gc, humongous)) {
1049       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1050       ResourceMark rm;
1051       outputStream* out = Log(gc, humongous)::debug_stream();
1052       region->print_on(out);
1053     }
1054 
1055     region->recycle();
1056     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1057   }
1058 }
1059 
1060 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1061 
1062   bool doHeapRegion(ShenandoahHeapRegion* r) {
1063     ShenandoahHeap* heap = ShenandoahHeap::heap();
1064 
1065     if (r->is_humongous_start()) {
1066       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1067       if (! heap->is_marked_complete(humongous_obj)) {
1068 
1069         heap->reclaim_humongous_region_at(r);
1070       }
1071     }
1072     return false;
1073   }
1074 };
1075 
1076 #ifdef ASSERT
1077 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1078   bool doHeapRegion(ShenandoahHeapRegion* r) {
1079     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1080     return false;
1081   }
1082 };
1083 #endif
1084 
1085 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1086   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1087 
1088   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1089 
1090   if (!cancelled_concgc()) {
1091 
1092     recycle_dirty_regions();
1093 
1094     ensure_parsability(true);
1095 
1096 #ifdef ASSERT
1097     if (ShenandoahVerify) {
1098       verify_heap_after_marking();
1099     }
1100 #endif
1101 
1102     // NOTE: This needs to be done during a stop the world pause, because
1103     // putting regions into the collection set concurrently with Java threads
1104     // will create a race. In particular, acmp could fail because when we
1105     // resolve the first operand, the containing region might not yet be in
1106     // the collection set, and thus return the original oop. When the 2nd
1107     // operand gets resolved, the region could be in the collection set
1108     // and the oop gets evacuated. If both operands have originally been
1109     // the same, we get false negatives.
1110 
1111     {
1112       ShenandoahHeapLock lock(this);
1113       _collection_set->clear();
1114       _free_regions->clear();
1115 
1116       ShenandoahReclaimHumongousRegionsClosure reclaim;
1117       heap_region_iterate(&reclaim);
1118 
1119 #ifdef ASSERT
1120       CheckCollectionSetClosure ccsc;
1121       _ordered_regions->heap_region_iterate(&ccsc);
1122 #endif
1123 
1124     if (UseShenandoahMatrix) {
1125       int num = num_regions();
1126       int *connections = NEW_C_HEAP_ARRAY(int, num * num, mtGC);
1127       calculate_matrix(connections);
1128       print_matrix(connections);
1129       _shenandoah_policy->choose_collection_set(_collection_set, connections);
1130       FREE_C_HEAP_ARRAY(int,connections);
1131     } else {
1132       _shenandoah_policy->choose_collection_set(_collection_set);
1133     }
1134 
1135     _shenandoah_policy->choose_free_set(_free_regions);
1136     }
1137 
1138     if (UseShenandoahMatrix) {
1139       _collection_set->print();
1140     }
1141 
1142     _bytes_allocated_since_cm = 0;
1143 
1144     Universe::update_heap_info_at_gc();
1145   }
1146 }
1147 
1148 
1149 class RetireTLABClosure : public ThreadClosure {
1150 private:
1151   bool _retire;
1152 
1153 public:
1154   RetireTLABClosure(bool retire) : _retire(retire) {
1155   }
1156 
1157   void do_thread(Thread* thread) {
1158     thread->gclab().make_parsable(_retire);
1159   }
1160 };
1161 
1162 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1163   if (UseTLAB) {
1164     CollectedHeap::ensure_parsability(retire_tlabs);
1165 
1166   RetireTLABClosure cl(retire_tlabs);
1167   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1168     cl.do_thread(thread);
1169   }
1170   gc_threads_do(&cl);
1171   }
1172 }
1173 
1174 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1175 private:
1176   ShenandoahHeap* _heap;
1177   Thread* _thread;
1178 public:
1179   ShenandoahEvacuateUpdateRootsClosure() :
1180     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1181   }
1182 
1183 private:
1184   template <class T>
1185   void do_oop_work(T* p) {
1186     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1187 
1188     T o = oopDesc::load_heap_oop(p);
1189     if (! oopDesc::is_null(o)) {
1190       oop obj = oopDesc::decode_heap_oop_not_null(o);
1191       if (_heap->in_collection_set(obj)) {
1192         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1193                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1194         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1195         if (oopDesc::unsafe_equals(resolved, obj)) {
1196           resolved = _heap->evacuate_object(obj, _thread);
1197         }
1198         oopDesc::encode_store_heap_oop(p, resolved);
1199       }
1200     }
1201 #ifdef ASSERT
1202     else {
1203       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1204       //               p2i(p),
1205       //               p2i((HeapWord*) obj),
1206       //               BOOL_TO_STR(_heap->is_in(obj)),
1207       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1208       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1209     }
1210 #endif
1211   }
1212 
1213 public:
1214   void do_oop(oop* p) {
1215     do_oop_work(p);
1216   }
1217   void do_oop(narrowOop* p) {
1218     do_oop_work(p);
1219   }
1220 };
1221 
1222 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1223   ShenandoahRootEvacuator* _rp;
1224 public:
1225 
1226   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1227     AbstractGangTask("Shenandoah evacuate and update roots"),
1228     _rp(rp)
1229   {
1230     // Nothing else to do.
1231   }
1232 
1233   void work(uint worker_id) {
1234     ShenandoahEvacuateUpdateRootsClosure cl;
1235     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1236 
1237     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1238   }
1239 };
1240 
1241 void ShenandoahHeap::evacuate_and_update_roots() {
1242 
1243   COMPILER2_PRESENT(DerivedPointerTable::clear());
1244 
1245 #ifdef ASSERT
1246   if (ShenandoahVerifyReadsToFromSpace) {
1247     set_from_region_protection(false);
1248   }
1249 #endif
1250 
1251   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1252   ClassLoaderDataGraph::clear_claimed_marks();
1253 
1254   {
1255     ShenandoahRootEvacuator rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots);
1256     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1257     workers()->run_task(&roots_task);
1258   }
1259 
1260 #ifdef ASSERT
1261   if (ShenandoahVerifyReadsToFromSpace) {
1262     set_from_region_protection(true);
1263   }
1264 #endif
1265 
1266   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1267 
1268 }
1269 
1270 
1271 void ShenandoahHeap::do_evacuation() {
1272 
1273   parallel_evacuate();
1274 
1275   if (ShenandoahVerify && ! cancelled_concgc()) {
1276     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1277     if (Thread::current()->is_VM_thread()) {
1278       verify_after_evacuation.doit();
1279     } else {
1280       VMThread::execute(&verify_after_evacuation);
1281     }
1282   }
1283 
1284 }
1285 
1286 void ShenandoahHeap::parallel_evacuate() {
1287   log_develop_trace(gc)("starting parallel_evacuate");
1288 
1289   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1290 
1291   if (log_is_enabled(Trace, gc, region)) {
1292     ResourceMark rm;
1293     outputStream *out = Log(gc, region)::trace_stream();
1294     out->print("Printing all available regions");
1295     print_heap_regions(out);
1296   }
1297 
1298   if (log_is_enabled(Trace, gc, cset)) {
1299     ResourceMark rm;
1300     outputStream *out = Log(gc, cset)::trace_stream();
1301     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1302     _collection_set->print(out);
1303 
1304     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1305     _free_regions->print(out);
1306   }
1307 
1308   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1309 
1310   conc_workers()->run_task(&evacuationTask);
1311 
1312   if (log_is_enabled(Trace, gc, cset)) {
1313     ResourceMark rm;
1314     outputStream *out = Log(gc, cset)::trace_stream();
1315     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1316                _collection_set->count());
1317 
1318     _collection_set->print(out);
1319 
1320     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1321                _free_regions->count());
1322     _free_regions->print(out);
1323 
1324   }
1325 
1326   if (log_is_enabled(Trace, gc, region)) {
1327     ResourceMark rm;
1328     outputStream *out = Log(gc, region)::trace_stream();
1329     out->print_cr("all regions after evacuation:");
1330     print_heap_regions(out);
1331   }
1332 
1333   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1334 }
1335 
1336 class VerifyEvacuationClosure: public ExtendedOopClosure {
1337 private:
1338   ShenandoahHeap*  _heap;
1339   ShenandoahHeapRegion* _from_region;
1340 
1341 public:
1342   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1343     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1344 private:
1345   template <class T>
1346   inline void do_oop_work(T* p) {
1347     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1348     if (! oopDesc::is_null(heap_oop)) {
1349       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1350     }
1351   }
1352 
1353 public:
1354   void do_oop(oop* p)       {
1355     do_oop_work(p);
1356   }
1357 
1358   void do_oop(narrowOop* p) {
1359     do_oop_work(p);
1360   }
1361 
1362 };
1363 
1364 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1365 
1366   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1367 
1368   CodeBlobToOopClosure blobsCl(cl, false);
1369   CLDToOopClosure cldCl(cl);
1370 
1371   ClassLoaderDataGraph::clear_claimed_marks();
1372 
1373   ShenandoahRootProcessor rp(this, 1);
1374   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1375 }
1376 
1377 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1378 
1379   VerifyEvacuationClosure rootsCl(from_region);
1380   roots_iterate(&rootsCl);
1381 
1382 }
1383 
1384 bool ShenandoahHeap::supports_tlab_allocation() const {
1385   return true;
1386 }
1387 
1388 
1389 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1390   size_t idx = _free_regions->current_index();
1391   ShenandoahHeapRegion* current = _free_regions->get(idx);
1392   if (current == NULL) {
1393     return 0;
1394   } else if (current->free() > MinTLABSize) {
1395     // Current region has enough space left, can use it.
1396     return current->free();
1397   } else {
1398     // No more space in current region, we will take next free region
1399     // on the next TLAB allocation.
1400     return ShenandoahHeapRegion::RegionSizeBytes;
1401   }
1402 }
1403 
1404 size_t ShenandoahHeap::max_tlab_size() const {
1405   return ShenandoahHeapRegion::RegionSizeBytes;
1406 }
1407 
1408 class ResizeGCLABClosure : public ThreadClosure {
1409 public:
1410   void do_thread(Thread* thread) {
1411     thread->gclab().resize();
1412   }
1413 };
1414 
1415 void ShenandoahHeap::resize_all_tlabs() {
1416   CollectedHeap::resize_all_tlabs();
1417 
1418   ResizeGCLABClosure cl;
1419   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1420     cl.do_thread(thread);
1421   }
1422   gc_threads_do(&cl);
1423 
1424 }
1425 
1426 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1427 public:
1428   void do_thread(Thread* thread) {
1429     thread->gclab().accumulate_statistics();
1430     thread->gclab().initialize_statistics();
1431   }
1432 };
1433 
1434 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1435 
1436   AccumulateStatisticsGCLABClosure cl;
1437   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1438     cl.do_thread(thread);
1439   }
1440   gc_threads_do(&cl);
1441 }
1442 
1443 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1444   return true;
1445 }
1446 
1447 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1448   // Overridden to do nothing.
1449   return new_obj;
1450 }
1451 
1452 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1453   return true;
1454 }
1455 
1456 bool ShenandoahHeap::card_mark_must_follow_store() const {
1457   return false;
1458 }
1459 
1460 void ShenandoahHeap::collect(GCCause::Cause cause) {
1461   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1462   if (GCCause::is_user_requested_gc(cause)) {
1463     if (! DisableExplicitGC) {
1464       _concurrent_gc_thread->do_full_gc(cause);
1465     }
1466   } else if (cause == GCCause::_allocation_failure) {
1467     collector_policy()->set_should_clear_all_soft_refs(true);
1468     _concurrent_gc_thread->do_full_gc(cause);
1469   }
1470 }
1471 
1472 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1473   //assert(false, "Shouldn't need to do full collections");
1474 }
1475 
1476 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1477   Unimplemented();
1478   return NULL;
1479 
1480 }
1481 
1482 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1483   return _shenandoah_policy;
1484 }
1485 
1486 
1487 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1488   Space* sp = heap_region_containing(addr);
1489   if (sp != NULL) {
1490     return sp->block_start(addr);
1491   }
1492   return NULL;
1493 }
1494 
1495 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1496   Space* sp = heap_region_containing(addr);
1497   assert(sp != NULL, "block_size of address outside of heap");
1498   return sp->block_size(addr);
1499 }
1500 
1501 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1502   Space* sp = heap_region_containing(addr);
1503   return sp->block_is_obj(addr);
1504 }
1505 
1506 jlong ShenandoahHeap::millis_since_last_gc() {
1507   return 0;
1508 }
1509 
1510 void ShenandoahHeap::prepare_for_verify() {
1511   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1512     ensure_parsability(false);
1513   }
1514 }
1515 
1516 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1517   workers()->print_worker_threads_on(st);
1518   conc_workers()->print_worker_threads_on(st);
1519 }
1520 
1521 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1522   workers()->threads_do(tcl);
1523   conc_workers()->threads_do(tcl);
1524 }
1525 
1526 void ShenandoahHeap::print_tracing_info() const {
1527   if (log_is_enabled(Info, gc, stats)) {
1528     ResourceMark rm;
1529     outputStream* out = Log(gc, stats)::info_stream();
1530     _shenandoah_policy->print_tracing_info(out);
1531   }
1532 }
1533 
1534 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1535 private:
1536   ShenandoahHeap*  _heap;
1537   VerifyOption     _vo;
1538   bool             _failures;
1539 public:
1540   // _vo == UsePrevMarking -> use "prev" marking information,
1541   // _vo == UseNextMarking -> use "next" marking information,
1542   // _vo == UseMarkWord    -> use mark word from object header.
1543   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1544     _heap(ShenandoahHeap::heap()),
1545     _vo(vo),
1546     _failures(false) { }
1547 
1548   bool failures() { return _failures; }
1549 
1550 private:
1551   template <class T>
1552   inline void do_oop_work(T* p) {
1553     oop obj = oopDesc::load_decode_heap_oop(p);
1554     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1555       { // Just for debugging.
1556         tty->print_cr("Root location "PTR_FORMAT
1557                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1558         //      obj->print_on(tty);
1559       }
1560     }
1561     guarantee(obj->is_oop_or_null(), "is oop or null");
1562   }
1563 
1564 public:
1565   void do_oop(oop* p)       {
1566     do_oop_work(p);
1567   }
1568 
1569   void do_oop(narrowOop* p) {
1570     do_oop_work(p);
1571   }
1572 
1573 };
1574 
1575 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1576 private:
1577   ShenandoahVerifyRootsClosure _rootsCl;
1578 public:
1579   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1580     _rootsCl(rc) {};
1581 
1582   void do_object(oop p) {
1583     _rootsCl.do_oop(&p);
1584   }
1585 };
1586 
1587 class ShenandoahVerifyKlassClosure: public KlassClosure {
1588   OopClosure *_oop_closure;
1589  public:
1590   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1591   void do_klass(Klass* k) {
1592     k->oops_do(_oop_closure);
1593   }
1594 };
1595 
1596 void ShenandoahHeap::verify(VerifyOption vo) {
1597   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1598 
1599     ShenandoahVerifyRootsClosure rootsCl(vo);
1600 
1601     assert(Thread::current()->is_VM_thread(),
1602            "Expected to be executed serially by the VM thread at this point");
1603 
1604     roots_iterate(&rootsCl);
1605 
1606     bool failures = rootsCl.failures();
1607     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1608 
1609     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1610 
1611     object_iterate(&heapCl);
1612     // TODO: Implement rest of it.
1613   } else {
1614     tty->print("(SKIPPING roots, heapRegions, remset) ");
1615   }
1616 }
1617 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1618   return _free_regions->capacity();
1619 }
1620 
1621 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1622   ObjectClosure* _cl;
1623 public:
1624   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1625   bool doHeapRegion(ShenandoahHeapRegion* r) {
1626     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1627     return false;
1628   }
1629 };
1630 
1631 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1632   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1633   heap_region_iterate(&blk, false, true);
1634 }
1635 
1636 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1637 private:
1638   ShenandoahHeap* _heap;
1639 
1640 public:
1641   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1642 
1643 private:
1644   template <class T>
1645   inline void do_oop_work(T* p) {
1646     T o = oopDesc::load_heap_oop(p);
1647     if (!oopDesc::is_null(o)) {
1648       oop obj = oopDesc::decode_heap_oop_not_null(o);
1649       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1650     }
1651   }
1652 public:
1653   void do_oop(oop* p) {
1654     do_oop_work(p);
1655   }
1656   void do_oop(narrowOop* p) {
1657     do_oop_work(p);
1658   }
1659 };
1660 
1661 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1662 private:
1663   ObjectClosure* _cl;
1664 public:
1665   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1666 
1667   virtual void do_object(oop obj) {
1668     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1669             "avoid double-counting: only non-forwarded objects here");
1670 
1671     // Fix up the ptrs.
1672     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1673     obj->oop_iterate(&adjust_ptrs);
1674 
1675     // Can reply the object now:
1676     _cl->do_object(obj);
1677   }
1678 };
1679 
1680 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1681   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1682 
1683   // Safe iteration does objects only with correct references.
1684   // This is why we skip dirty regions that have stale copies of objects,
1685   // and fix up the pointers in the returned objects.
1686 
1687   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1688   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1689   heap_region_iterate(&blk,
1690                       /* skip_dirty_regions = */ true,
1691                       /* skip_humongous_continuations = */ true);
1692 
1693   _need_update_refs = false; // already updated the references
1694 }
1695 
1696 // Apply blk->doHeapRegion() on all committed regions in address order,
1697 // terminating the iteration early if doHeapRegion() returns true.
1698 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1699   for (size_t i = 0; i < _num_regions; i++) {
1700     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1701     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1702       continue;
1703     }
1704     if (skip_dirty_regions && in_collection_set(current)) {
1705       continue;
1706     }
1707     if (blk->doHeapRegion(current)) {
1708       return;
1709     }
1710   }
1711 }
1712 
1713 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1714   ShenandoahHeap* sh;
1715 public:
1716   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1717 
1718   bool doHeapRegion(ShenandoahHeapRegion* r) {
1719     r->clear_live_data();
1720     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1721     return false;
1722   }
1723 };
1724 
1725 
1726 void ShenandoahHeap::start_concurrent_marking() {
1727 
1728   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1729   accumulate_statistics_all_tlabs();
1730   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1731 
1732   set_concurrent_mark_in_progress(true);
1733   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1734   if (UseTLAB) {
1735     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1736     ensure_parsability(true);
1737     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1738   }
1739 
1740   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1741   _used_start_gc = used();
1742 
1743 #ifdef ASSERT
1744   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1745     ensure_parsability(false);
1746     print_all_refs("pre-mark");
1747   }
1748 #endif
1749 
1750   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1751   ClearLivenessClosure clc(this);
1752   heap_region_iterate(&clc);
1753   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1754 
1755   // print_all_refs("pre -mark");
1756 
1757   // oopDesc::_debug = true;
1758 
1759   // Make above changes visible to worker threads
1760   OrderAccess::fence();
1761 
1762   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1763   concurrentMark()->init_mark_roots();
1764   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1765 
1766   //  print_all_refs("pre-mark2");
1767 }
1768 
1769 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1770 
1771   ShenandoahHeap* _sh;
1772 
1773 public:
1774   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1775 
1776   template<class T> void do_oop_nv(T* p) {
1777     T heap_oop = oopDesc::load_heap_oop(p);
1778     if (!oopDesc::is_null(heap_oop)) {
1779       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1780       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1781                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1782                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1783                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1784                 obj->klass()->external_name(),
1785                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1786                 );
1787       obj = oopDesc::bs()->read_barrier(obj);
1788       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1789       guarantee(obj->is_oop(), "is_oop");
1790       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1791     }
1792   }
1793 
1794   void do_oop(oop* p)       { do_oop_nv(p); }
1795   void do_oop(narrowOop* p) { do_oop_nv(p); }
1796 
1797 };
1798 
1799 void ShenandoahHeap::verify_heap_after_evacuation() {
1800 
1801   verify_heap_size_consistency();
1802 
1803   ensure_parsability(false);
1804 
1805   VerifyAfterEvacuationClosure cl;
1806   roots_iterate(&cl);
1807 
1808   ObjectToOopClosure objs(&cl);
1809   object_iterate(&objs);
1810 
1811 }
1812 
1813 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1814 public:
1815   bool doHeapRegion(ShenandoahHeapRegion* r) {
1816     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1817     return false;
1818   }
1819 };
1820 
1821 void ShenandoahHeap::swap_mark_bitmaps() {
1822   // Swap bitmaps.
1823   CMBitMap* tmp1 = _complete_mark_bit_map;
1824   _complete_mark_bit_map = _next_mark_bit_map;
1825   _next_mark_bit_map = tmp1;
1826 
1827   // Swap top-at-mark-start pointers
1828   HeapWord** tmp2 = _complete_top_at_mark_starts;
1829   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1830   _next_top_at_mark_starts = tmp2;
1831 
1832   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1833   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1834   _next_top_at_mark_starts_base = tmp3;
1835 }
1836 
1837 void ShenandoahHeap::stop_concurrent_marking() {
1838   assert(concurrent_mark_in_progress(), "How else could we get here?");
1839   if (! cancelled_concgc()) {
1840     // If we needed to update refs, and concurrent marking has been cancelled,
1841     // we need to finish updating references.
1842     set_need_update_refs(false);
1843     swap_mark_bitmaps();
1844   }
1845   set_concurrent_mark_in_progress(false);
1846 
1847   if (log_is_enabled(Trace, gc, region)) {
1848     ResourceMark rm;
1849     outputStream* out = Log(gc, region)::trace_stream();
1850     print_heap_regions(out);
1851   }
1852 
1853 }
1854 
1855 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1856   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1857   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1858 }
1859 
1860 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1861   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1862   _evacuation_in_progress = in_progress ? 1 : 0;
1863   OrderAccess::fence();
1864 }
1865 
1866 void ShenandoahHeap::verify_copy(oop p,oop c){
1867     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1868     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1869     if (p->klass() != c->klass()) {
1870       print_heap_regions();
1871     }
1872     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1873     assert(p->size() == c->size(), "verify size");
1874     // Object may have been locked between copy and verification
1875     //    assert(p->mark() == c->mark(), "verify mark");
1876     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1877   }
1878 
1879 void ShenandoahHeap::oom_during_evacuation() {
1880   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1881                         Thread::current()->osthread()->thread_id());
1882 
1883   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1884   collector_policy()->set_should_clear_all_soft_refs(true);
1885   concurrent_thread()->try_set_full_gc();
1886   cancel_concgc(_oom_evacuation);
1887 
1888   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1889     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1890     while (_evacuation_in_progress) { // wait.
1891       Thread::current()->_ParkEvent->park(1);
1892     }
1893   }
1894 
1895 }
1896 
1897 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1898   // Initialize Brooks pointer for the next object
1899   HeapWord* result = obj + BrooksPointer::word_size();
1900   BrooksPointer::initialize(oop(result));
1901   return result;
1902 }
1903 
1904 uint ShenandoahHeap::oop_extra_words() {
1905   return BrooksPointer::word_size();
1906 }
1907 
1908 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1909   size_t base = _num_regions;
1910   ensure_new_regions(num_regions);
1911   for (size_t i = 0; i < num_regions; i++) {
1912     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
1913     size_t new_region_index = i + base;
1914     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1915     new_region->initialize_heap_region(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1916 
1917     if (log_is_enabled(Trace, gc, region)) {
1918       ResourceMark rm;
1919       outputStream* out = Log(gc, region)::trace_stream();
1920       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
1921       new_region->print_on(out);
1922     }
1923 
1924     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
1925     _ordered_regions->add_region(new_region);
1926     _sorted_regions->add_region(new_region);
1927     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
1928     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1929     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1930 
1931     _free_regions->add_region(new_region);
1932   }
1933 }
1934 
1935 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
1936 
1937   size_t num_regions = _num_regions;
1938   size_t new_num_regions = num_regions + new_regions;
1939   assert(new_num_regions <= _max_regions, "we checked this earlier");
1940 
1941   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
1942   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
1943   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
1944   assert(success, "should always be able to expand by requested size");
1945 
1946   _num_regions = new_num_regions;
1947 
1948 }
1949 
1950 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1951   _heap(ShenandoahHeap::heap_no_check()) {
1952 }
1953 
1954 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
1955   _heap = heap;
1956 }
1957 
1958 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1959 
1960   assert(_heap != NULL, "sanity");
1961   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1962 #ifdef ASSERT
1963   if (_heap->concurrent_mark_in_progress()) {
1964     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1965   }
1966 #endif
1967   assert(!oopDesc::is_null(obj), "null");
1968   return _heap->is_marked_next(obj);
1969 }
1970 
1971 void ShenandoahHeap::ref_processing_init() {
1972   MemRegion mr = reserved_region();
1973 
1974   isAlive.init(ShenandoahHeap::heap());
1975   assert(_max_workers > 0, "Sanity");
1976 
1977   _ref_processor =
1978     new ReferenceProcessor(mr,    // span
1979                            ParallelRefProcEnabled,
1980                            // mt processing
1981                            _max_workers,
1982                            // degree of mt processing
1983                            true,
1984                            // mt discovery
1985                            _max_workers,
1986                            // degree of mt discovery
1987                            false,
1988                            // Reference discovery is not atomic
1989                            &isAlive);
1990 }
1991 
1992 #ifdef ASSERT
1993 void ShenandoahHeap::set_from_region_protection(bool protect) {
1994   for (uint i = 0; i < _num_regions; i++) {
1995     ShenandoahHeapRegion* region = _ordered_regions->get(i);
1996     if (region != NULL && in_collection_set(region)) {
1997       if (protect) {
1998         region->memProtectionOn();
1999       } else {
2000         region->memProtectionOff();
2001       }
2002     }
2003   }
2004 }
2005 #endif
2006 
2007 size_t ShenandoahHeap::num_regions() {
2008   return _num_regions;
2009 }
2010 
2011 size_t ShenandoahHeap::max_regions() {
2012   return _max_regions;
2013 }
2014 
2015 GCTracer* ShenandoahHeap::tracer() {
2016   return shenandoahPolicy()->tracer();
2017 }
2018 
2019 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2020   return _free_regions->used();
2021 }
2022 
2023 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2024   if (try_cancel_concgc()) {
2025     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2026     _shenandoah_policy->report_concgc_cancelled();
2027   }
2028 }
2029 
2030 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2031   if (try_cancel_concgc()) {
2032     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2033     _shenandoah_policy->report_concgc_cancelled();
2034   }
2035 }
2036 
2037 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2038   switch (cause) {
2039     case _oom_evacuation:
2040       return "Out of memory for evacuation";
2041     case _vm_stop:
2042       return "Stopping VM";
2043     default:
2044       return "Unknown";
2045   }
2046 }
2047 
2048 void ShenandoahHeap::clear_cancelled_concgc() {
2049   set_cancelled_concgc(false);
2050 }
2051 
2052 uint ShenandoahHeap::max_workers() {
2053   return _max_workers;
2054 }
2055 
2056 uint ShenandoahHeap::max_parallel_workers() {
2057   return _max_parallel_workers;
2058 }
2059 uint ShenandoahHeap::max_conc_workers() {
2060   return _max_conc_workers;
2061 }
2062 
2063 void ShenandoahHeap::stop() {
2064   // The shutdown sequence should be able to terminate when GC is running.
2065 
2066   // Step 1. Notify control thread that we are in shutdown.
2067   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2068   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2069   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2070 
2071   // Step 2. Notify GC workers that we are cancelling GC.
2072   cancel_concgc(_vm_stop);
2073 
2074   // Step 3. Wait until GC worker exits normally.
2075   _concurrent_gc_thread->stop();
2076 }
2077 
2078 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2079 
2080   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2081   workers()->run_task(&shenandoah_unlink_task);
2082 
2083   //  if (G1StringDedup::is_enabled()) {
2084   //    G1StringDedup::unlink(is_alive);
2085   //  }
2086 }
2087 
2088 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2089   _need_update_refs = need_update_refs;
2090 }
2091 
2092 //fixme this should be in heapregionset
2093 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2094   size_t region_idx = r->region_number() + 1;
2095   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2096   guarantee(next->region_number() == region_idx, "region number must match");
2097   while (next->is_humongous()) {
2098     region_idx = next->region_number() + 1;
2099     next = _ordered_regions->get(region_idx);
2100     guarantee(next->region_number() == region_idx, "region number must match");
2101   }
2102   return next;
2103 }
2104 
2105 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2106   _in_cset_fast_test_base[region_index] = b;
2107 }
2108 
2109 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2110   return _monitoring_support;
2111 }
2112 
2113 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2114   return _complete_mark_bit_map;
2115 }
2116 
2117 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2118   return _next_mark_bit_map;
2119 }
2120 
2121 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2122   _free_regions->add_region(r);
2123 }
2124 
2125 void ShenandoahHeap::clear_free_regions() {
2126   _free_regions->clear();
2127 }
2128 
2129 address ShenandoahHeap::in_cset_fast_test_addr() {
2130   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2131 }
2132 
2133 address ShenandoahHeap::cancelled_concgc_addr() {
2134   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2135 }
2136 
2137 void ShenandoahHeap::clear_cset_fast_test() {
2138   assert(_in_cset_fast_test_base != NULL, "sanity");
2139   memset(_in_cset_fast_test_base, false,
2140          _in_cset_fast_test_length * sizeof(bool));
2141 }
2142 
2143 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2144   return HeapRegionBounds::max_size();
2145 }
2146 
2147 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2148   return _bytes_allocated_since_cm;
2149 }
2150 
2151 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2152   _bytes_allocated_since_cm = bytes;
2153 }
2154 
2155 size_t ShenandoahHeap::max_allocated_gc() {
2156   return _max_allocated_gc;
2157 }
2158 
2159 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2160   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2161   _next_top_at_mark_starts[index] = addr;
2162 }
2163 
2164 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2165   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2166   return _next_top_at_mark_starts[index];
2167 }
2168 
2169 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2170   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2171   _complete_top_at_mark_starts[index] = addr;
2172 }
2173 
2174 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2175   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2176   return _complete_top_at_mark_starts[index];
2177 }
2178 
2179 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2180   _full_gc_in_progress = in_progress;
2181 }
2182 
2183 bool ShenandoahHeap::is_full_gc_in_progress() const {
2184   return _full_gc_in_progress;
2185 }
2186 
2187 class NMethodOopInitializer : public OopClosure {
2188 private:
2189   ShenandoahHeap* _heap;
2190 public:
2191   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2192   }
2193 
2194 private:
2195   template <class T>
2196   inline void do_oop_work(T* p) {
2197     T o = oopDesc::load_heap_oop(p);
2198     if (! oopDesc::is_null(o)) {
2199       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2200       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2201       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2202         oopDesc::encode_store_heap_oop(p, obj2);
2203       }
2204     }
2205   }
2206 
2207 public:
2208   void do_oop(oop* o) {
2209     do_oop_work(o);
2210   }
2211   void do_oop(narrowOop* o) {
2212     do_oop_work(o);
2213   }
2214 };
2215 
2216 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2217   NMethodOopInitializer init;
2218   nm->oops_do(&init);
2219   nm->fix_oop_relocations();
2220 }
2221 
2222 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2223 }
2224 
2225 void ShenandoahHeap::pin_object(oop o) {
2226   heap_region_containing(o)->pin();
2227 }
2228 
2229 void ShenandoahHeap::unpin_object(oop o) {
2230   heap_region_containing(o)->unpin();
2231 }
2232 
2233 
2234 GCTimer* ShenandoahHeap::gc_timer() const {
2235   return _gc_timer;
2236 }
2237 
2238 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2239 private:
2240   int _x;
2241   int *_matrix;
2242   int _num_regions;
2243   oop _p;
2244 
2245 public:
2246   RecordAllRefsOopClosure(int *matrix, int x, size_t num_regions, oop p) :
2247     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2248 
2249   template <class T>
2250   void do_oop_work(T* p) {
2251     oop o = oopDesc::load_decode_heap_oop(p);
2252     if (o != NULL) {
2253       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2254         int y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2255         _matrix[_x * _num_regions + y]++;
2256       }
2257     }
2258   }
2259   void do_oop(oop* p) {
2260     do_oop_work(p);
2261   }
2262 
2263   void do_oop(narrowOop* p) {
2264     do_oop_work(p);
2265   }
2266 
2267 };
2268 
2269 class RecordAllRefsObjectClosure : public ObjectClosure {
2270   int *_matrix;
2271   size_t _num_regions;
2272 
2273 public:
2274   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2275     _matrix(matrix), _num_regions(num_regions) {}
2276 
2277   void do_object(oop p) {
2278     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2279       int x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2280       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2281       p->oop_iterate(&cl);
2282     }
2283   }
2284 };
2285 void ShenandoahHeap::calculate_matrix(int* connections) {
2286   log_develop_trace(gc)("calculating matrix");
2287   ensure_parsability(false);
2288   int num = num_regions();
2289 
2290   for (int i = 0; i < num; i++) {
2291     for (int j = 0; j < num; j++) {
2292       connections[i * num + j] = 0;
2293     }
2294   }
2295 
2296   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2297   roots_iterate(&cl);
2298 
2299   RecordAllRefsObjectClosure cl2(connections, num);
2300   object_iterate(&cl2);
2301 
2302 }
2303 
2304 void ShenandoahHeap::print_matrix(int* connections) {
2305   int num = num_regions();
2306   int cs_regions = 0;
2307   int referenced = 0;
2308 
2309   for (int i = 0; i < num; i++) {
2310     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data();
2311 
2312     int numReferencedRegions = 0;
2313     int numReferencedByRegions = 0;
2314 
2315     for (int j = 0; j < num; j++) {
2316       if (connections[i * num + j] > 0)
2317         numReferencedRegions++;
2318 
2319       if (connections [j * num + i] > 0)
2320         numReferencedByRegions++;
2321 
2322       cs_regions++;
2323       referenced += numReferencedByRegions;
2324     }
2325 
2326     if (ShenandoahHeap::heap()->regions()->get(i)->get_live_data() > 0) {
2327       tty->print("Region %d is referenced by %d regions {",
2328                  i, numReferencedByRegions);
2329       int col_count = 0;
2330       for (int j = 0; j < num; j++) {
2331         int foo = connections[j * num + i];
2332         if (foo > 0) {
2333           col_count++;
2334           if ((col_count % 10) == 0)
2335             tty->print("\n");
2336           tty->print("%d(%d), ", j,foo);
2337         }
2338       }
2339       tty->print("} \n");
2340     }
2341   }
2342 
2343   double avg = (double)referenced / (double) cs_regions;
2344   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2345 }
2346 
2347 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2348 private:
2349   size_t _garbage;
2350 public:
2351   ShenandoahCountGarbageClosure() : _garbage(0) {
2352   }
2353 
2354   bool doHeapRegion(ShenandoahHeapRegion* r) {
2355     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2356       _garbage += r->garbage();
2357     }
2358     return false;
2359   }
2360 
2361   size_t garbage() {
2362     return _garbage;
2363   }
2364 };
2365 
2366 size_t ShenandoahHeap::garbage() {
2367   ShenandoahCountGarbageClosure cl;
2368   heap_region_iterate(&cl);
2369   return cl.garbage();
2370 }
2371 
2372 #ifdef ASSERT
2373 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2374   assert(_heap_lock == locked, "must be locked");
2375   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2376 }
2377 #endif