1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/gcTraceTime.inline.hpp"
  28 #include "gc/shared/parallelCleaning.hpp"
  29 
  30 #include "gc/shenandoah/brooksPointer.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 const char* ShenandoahHeap::name() const {
  51   return "Shenandoah";
  52 }
  53 
  54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  55   HeapWord* cur = NULL;
  56   for (cur = start; cur < end; cur++) {
  57     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  58   }
  59 }
  60 
  61 class PrintHeapRegionsClosure : public
  62    ShenandoahHeapRegionClosure {
  63 private:
  64   outputStream* _st;
  65 public:
  66   PrintHeapRegionsClosure() : _st(tty) {}
  67   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  68 
  69   bool doHeapRegion(ShenandoahHeapRegion* r) {
  70     r->print_on(_st);
  71     return false;
  72   }
  73 };
  74 
  75 class ShenandoahPretouchTask : public AbstractGangTask {
  76 private:
  77   ShenandoahHeapRegionSet* _regions;
  78   const size_t _bitmap_size;
  79   const size_t _page_size;
  80   char* _bitmap0_base;
  81   char* _bitmap1_base;
  82 public:
  83   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  84                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  85                          size_t page_size) :
  86     AbstractGangTask("Shenandoah PreTouch",
  87                      Universe::is_fully_initialized() ? GCId::current_raw() :
  88                                                         // During VM initialization there is
  89                                                         // no GC cycle that this task can be
  90                                                         // associated with.
  91                                                         GCId::undefined()),
  92     _bitmap0_base(bitmap0_base),
  93     _bitmap1_base(bitmap1_base),
  94     _regions(regions),
  95     _bitmap_size(bitmap_size),
  96     _page_size(page_size) {
  97     _regions->clear_current_index();
  98   };
  99 
 100   virtual void work(uint worker_id) {
 101     ShenandoahHeapRegion* r = _regions->claim_next();
 102     while (r != NULL) {
 103       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 104                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 105       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 106 
 107       size_t start = r->region_number()       * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 108       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 109       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 110 
 111       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 112                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 113       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 114 
 115       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 116                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 117       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 118 
 119       r = _regions->claim_next();
 120     }
 121   }
 122 };
 123 
 124 jint ShenandoahHeap::initialize() {
 125   CollectedHeap::pre_initialize();
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 129 
 130   Universe::check_alignment(max_byte_size,
 131                             ShenandoahHeapRegion::RegionSizeBytes,
 132                             "shenandoah heap");
 133   Universe::check_alignment(init_byte_size,
 134                             ShenandoahHeapRegion::RegionSizeBytes,
 135                             "shenandoah heap");
 136 
 137   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 138                                                  Arguments::conservative_max_heap_alignment());
 139   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 140 
 141   set_barrier_set(new ShenandoahBarrierSet(this));
 142   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 143   _storage.initialize(pgc_rs, init_byte_size);
 144 
 145   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 146   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 147   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 148   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 149   assert(init_byte_size == _initialSize, "tautology");
 150   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 151   _collection_set = new ShenandoahCollectionSet(_max_regions);
 152   _free_regions = new ShenandoahFreeSet(_max_regions);
 153 
 154   // Initialize fast collection set test structure.
 155   _in_cset_fast_test_length = _max_regions;
 156   _in_cset_fast_test_base =
 157                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 158   _in_cset_fast_test = _in_cset_fast_test_base -
 159                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 160 
 161   _next_top_at_mark_starts_base =
 162                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 163   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 164                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 165 
 166   _complete_top_at_mark_starts_base =
 167                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 168   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 169                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 170 
 171   size_t i = 0;
 172   for (i = 0; i < _num_regions; i++) {
 173     _in_cset_fast_test_base[i] = false; // Not in cset
 174     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 175     _complete_top_at_mark_starts_base[i] = bottom;
 176     _next_top_at_mark_starts_base[i] = bottom;
 177   }
 178 
 179   {
 180     ShenandoahHeapLock lock(this);
 181     for (i = 0; i < _num_regions; i++) {
 182       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 183                                                                regionSizeWords * i, regionSizeWords, i);
 184       _free_regions->add_region(current);
 185       _ordered_regions->add_region(current);
 186     }
 187   }
 188   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 189   _first_region = _ordered_regions->get(0);
 190   _first_region_bottom = _first_region->bottom();
 191   assert((((size_t) _first_region_bottom) &
 192           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 193          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 194 
 195   _numAllocs = 0;
 196 
 197   if (log_is_enabled(Trace, gc, region)) {
 198     ResourceMark rm;
 199     outputStream* out = Log(gc, region)::trace_stream();
 200     log_trace(gc, region)("All Regions");
 201     _ordered_regions->print(out);
 202     log_trace(gc, region)("Free Regions");
 203     _free_regions->print(out);
 204   }
 205 
 206   // The call below uses stuff (the SATB* things) that are in G1, but probably
 207   // belong into a shared location.
 208   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 209                                                SATB_Q_FL_lock,
 210                                                20 /*G1SATBProcessCompletedThreshold */,
 211                                                Shared_SATB_Q_lock);
 212 
 213   // Reserve space for prev and next bitmap.
 214   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 215   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 216 
 217   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 218 
 219   ReservedSpace bitmap0(bitmap_size, page_size);
 220   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 221   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 222   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 223 
 224   ReservedSpace bitmap1(bitmap_size, page_size);
 225   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 226   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 227   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 228 
 229   if (ShenandoahAlwaysPreTouch) {
 230     assert (!AlwaysPreTouch, "Should have been overridden");
 231 
 232     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 233     // before initialize() below zeroes it with initializing thread. For any given region,
 234     // we touch the region and the corresponding bitmaps from the same thread.
 235 
 236     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 237                        _ordered_regions->count(), page_size);
 238     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), bitmap_size, page_size);
 239     _workers->run_task(&cl);
 240   }
 241 
 242   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 243   _complete_mark_bit_map = &_mark_bit_map0;
 244 
 245   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 246   _next_mark_bit_map = &_mark_bit_map1;
 247 
 248   _monitoring_support = new ShenandoahMonitoringSupport(this);
 249 
 250   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 251 
 252   ShenandoahMarkCompact::initialize();
 253 
 254   return JNI_OK;
 255 }
 256 
 257 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 258   CollectedHeap(),
 259   _shenandoah_policy(policy),
 260   _concurrent_mark_in_progress(0),
 261   _evacuation_in_progress(0),
 262   _full_gc_in_progress(false),
 263   _free_regions(NULL),
 264   _collection_set(NULL),
 265   _bytes_allocated_since_cm(0),
 266   _bytes_allocated_during_cm(0),
 267   _max_allocated_gc(0),
 268   _allocated_last_gc(0),
 269   _used_start_gc(0),
 270   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 271   _ref_processor(NULL),
 272   _in_cset_fast_test(NULL),
 273   _in_cset_fast_test_base(NULL),
 274   _next_top_at_mark_starts(NULL),
 275   _next_top_at_mark_starts_base(NULL),
 276   _complete_top_at_mark_starts(NULL),
 277   _complete_top_at_mark_starts_base(NULL),
 278   _mark_bit_map0(),
 279   _mark_bit_map1(),
 280   _cancelled_concgc(false),
 281   _need_update_refs(false),
 282   _need_reset_bitmaps(false),
 283   _heap_lock(0),
 284 #ifdef ASSERT
 285   _heap_lock_owner(NULL),
 286 #endif
 287   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 288 
 289 {
 290   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 291   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 292   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 293 
 294   _scm = new ShenandoahConcurrentMark();
 295   _used = 0;
 296 
 297   _max_workers = MAX2(_max_workers, 1U);
 298   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 299                             /* are_GC_task_threads */true,
 300                             /* are_ConcurrentGC_threads */false);
 301   if (_workers == NULL) {
 302     vm_exit_during_initialization("Failed necessary allocation.");
 303   } else {
 304     _workers->initialize_workers();
 305   }
 306 }
 307 
 308 class ResetNextBitmapTask : public AbstractGangTask {
 309 private:
 310   ShenandoahHeapRegionSet* _regions;
 311 
 312 public:
 313   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 314     AbstractGangTask("Parallel Reset Bitmap Task"),
 315     _regions(regions) {
 316     _regions->clear_current_index();
 317   }
 318 
 319   void work(uint worker_id) {
 320     ShenandoahHeapRegion* region = _regions->claim_next();
 321     ShenandoahHeap* heap = ShenandoahHeap::heap();
 322     while (region != NULL) {
 323       HeapWord* bottom = region->bottom();
 324       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 325       if (top > bottom) {
 326         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 327       }
 328       region = _regions->claim_next();
 329     }
 330   }
 331 };
 332 
 333 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 334   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 335   workers->run_task(&task);
 336 }
 337 
 338 class ResetCompleteBitmapTask : public AbstractGangTask {
 339 private:
 340   ShenandoahHeapRegionSet* _regions;
 341 
 342 public:
 343   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 344     AbstractGangTask("Parallel Reset Bitmap Task"),
 345     _regions(regions) {
 346     _regions->clear_current_index();
 347   }
 348 
 349   void work(uint worker_id) {
 350     ShenandoahHeapRegion* region = _regions->claim_next();
 351     ShenandoahHeap* heap = ShenandoahHeap::heap();
 352     while (region != NULL) {
 353       HeapWord* bottom = region->bottom();
 354       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 355       if (top > bottom) {
 356         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 357       }
 358       region = _regions->claim_next();
 359     }
 360   }
 361 };
 362 
 363 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 364   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 365   workers->run_task(&task);
 366 }
 367 
 368 bool ShenandoahHeap::is_next_bitmap_clear() {
 369   HeapWord* start = _ordered_regions->bottom();
 370   HeapWord* end = _ordered_regions->end();
 371   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 372 }
 373 
 374 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 375   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 376 }
 377 
 378 void ShenandoahHeap::print_on(outputStream* st) const {
 379   st->print("Shenandoah Heap");
 380   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 381   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 382             p2i(reserved_region().start()),
 383             p2i(reserved_region().end()));
 384   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 385   if (_concurrent_mark_in_progress) {
 386     st->print("marking ");
 387   }
 388   if (_evacuation_in_progress) {
 389     st->print("evacuating ");
 390   }
 391   if (cancelled_concgc()) {
 392     st->print("cancelled ");
 393   }
 394   st->print("\n");
 395 
 396   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 397   st->print   ("Virtual space:");
 398   if (_storage.special()) st->print(" (pinned in memory)");
 399   st->cr();
 400   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 401   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 402   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 403   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 404 
 405   if (Verbose) {
 406     print_heap_regions(st);
 407   }
 408 }
 409 
 410 class InitGCLABClosure : public ThreadClosure {
 411 public:
 412   void do_thread(Thread* thread) {
 413     thread->gclab().initialize(true);
 414   }
 415 };
 416 
 417 void ShenandoahHeap::post_initialize() {
 418   if (UseTLAB) {
 419     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 420     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 421     // Their initialization should be handled separately. Is we miss some threads here,
 422     // then any other TLAB-related activity would fail with asserts.
 423 
 424     InitGCLABClosure init_gclabs;
 425     {
 426       MutexLocker ml(Threads_lock);
 427       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 428         init_gclabs.do_thread(thread);
 429       }
 430     }
 431     gc_threads_do(&init_gclabs);
 432 
 433     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 434     // Now, we will let WorkGang to initialize gclab when new worker is created.
 435     _workers->set_initialize_gclab();
 436   }
 437 
 438   _scm->initialize(_max_workers);
 439 
 440   ref_processing_init();
 441 }
 442 
 443 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 444   size_t sum;
 445 public:
 446 
 447   CalculateUsedRegionClosure() {
 448     sum = 0;
 449   }
 450 
 451   bool doHeapRegion(ShenandoahHeapRegion* r) {
 452     sum = sum + r->used();
 453     return false;
 454   }
 455 
 456   size_t getResult() { return sum;}
 457 };
 458 
 459 size_t ShenandoahHeap::calculateUsed() {
 460   CalculateUsedRegionClosure cl;
 461   heap_region_iterate(&cl);
 462   return cl.getResult();
 463 }
 464 
 465 void ShenandoahHeap::verify_heap_size_consistency() {
 466 
 467   assert(calculateUsed() == used(),
 468          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 469 }
 470 
 471 size_t ShenandoahHeap::used() const {
 472   OrderAccess::acquire();
 473   return _used;
 474 }
 475 
 476 void ShenandoahHeap::increase_used(size_t bytes) {
 477   assert_heaplock_or_safepoint();
 478   _used += bytes;
 479 }
 480 
 481 void ShenandoahHeap::set_used(size_t bytes) {
 482   assert_heaplock_or_safepoint();
 483   _used = bytes;
 484 }
 485 
 486 void ShenandoahHeap::decrease_used(size_t bytes) {
 487   assert_heaplock_or_safepoint();
 488   assert(_used >= bytes, "never decrease heap size by more than we've left");
 489   _used -= bytes;
 490 }
 491 
 492 size_t ShenandoahHeap::capacity() const {
 493   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 494 }
 495 
 496 bool ShenandoahHeap::is_maximal_no_gc() const {
 497   Unimplemented();
 498   return true;
 499 }
 500 
 501 size_t ShenandoahHeap::max_capacity() const {
 502   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 503 }
 504 
 505 size_t ShenandoahHeap::min_capacity() const {
 506   return _initialSize;
 507 }
 508 
 509 VirtualSpace* ShenandoahHeap::storage() const {
 510   return (VirtualSpace*) &_storage;
 511 }
 512 
 513 bool ShenandoahHeap::is_in(const void* p) const {
 514   HeapWord* first_region_bottom = _first_region->bottom();
 515   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 516   return p >= _first_region_bottom && p < last_region_end;
 517 }
 518 
 519 bool ShenandoahHeap::is_scavengable(const void* p) {
 520   return true;
 521 }
 522 
 523 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 524   // Retain tlab and allocate object in shared space if
 525   // the amount free in the tlab is too large to discard.
 526   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 527     thread->gclab().record_slow_allocation(size);
 528     return NULL;
 529   }
 530 
 531   // Discard gclab and allocate a new one.
 532   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 533   size_t new_gclab_size = thread->gclab().compute_size(size);
 534 
 535   thread->gclab().clear_before_allocation();
 536 
 537   if (new_gclab_size == 0) {
 538     return NULL;
 539   }
 540 
 541   // Allocate a new GCLAB...
 542   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 543   if (obj == NULL) {
 544     return NULL;
 545   }
 546 
 547   if (ZeroTLAB) {
 548     // ..and clear it.
 549     Copy::zero_to_words(obj, new_gclab_size);
 550   } else {
 551     // ...and zap just allocated object.
 552 #ifdef ASSERT
 553     // Skip mangling the space corresponding to the object header to
 554     // ensure that the returned space is not considered parsable by
 555     // any concurrent GC thread.
 556     size_t hdr_size = oopDesc::header_size();
 557     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 558 #endif // ASSERT
 559   }
 560   thread->gclab().fill(obj, obj + size, new_gclab_size);
 561   return obj;
 562 }
 563 
 564 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 565   return allocate_new_tlab(word_size, false);
 566 }
 567 
 568 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 569   return allocate_new_tlab(word_size, true);
 570 }
 571 
 572 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 573   HeapWord* result = allocate_memory(word_size, evacuating);
 574 
 575   if (result != NULL) {
 576     assert(! in_collection_set(result), "Never allocate in dirty region");
 577     _bytes_allocated_since_cm += word_size * HeapWordSize;
 578 
 579     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 580 
 581   }
 582   return result;
 583 }
 584 
 585 ShenandoahHeap* ShenandoahHeap::heap() {
 586   CollectedHeap* heap = Universe::heap();
 587   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 588   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 589   return (ShenandoahHeap*) heap;
 590 }
 591 
 592 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 593   CollectedHeap* heap = Universe::heap();
 594   return (ShenandoahHeap*) heap;
 595 }
 596 
 597 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 598 
 599   ShenandoahHeapLock heap_lock(this);
 600 
 601   HeapWord* result = allocate_memory_under_lock(word_size);
 602   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 603 
 604   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 605     grow_heap_by(grow_by);
 606     result = allocate_memory_under_lock(word_size);
 607   }
 608 
 609   return result;
 610 }
 611 
 612 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 613   HeapWord* result = NULL;
 614   result = allocate_memory_work(word_size);
 615 
 616   if (!evacuating) {
 617     // Allocation failed, try full-GC, then retry allocation.
 618     //
 619     // It might happen that one of the threads requesting allocation would unblock
 620     // way later after full-GC happened, only to fail the second allocation, because
 621     // other threads have already depleted the free storage. In this case, a better
 622     // strategy would be to try full-GC again.
 623     //
 624     // Lacking the way to detect progress from "collect" call, we are left with blindly
 625     // retrying for some bounded number of times.
 626     // TODO: Poll if Full GC made enough progress to warrant retry.
 627     int tries = 0;
 628     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 629       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 630                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 631       collect(GCCause::_allocation_failure);
 632       result = allocate_memory_work(word_size);
 633     }
 634   }
 635 
 636   // Only update monitoring counters when not calling from a write-barrier.
 637   // Otherwise we might attempt to grab the Service_lock, which we must
 638   // not do when coming from a write-barrier (because the thread might
 639   // already hold the Compile_lock).
 640   if (! evacuating) {
 641     monitoring_support()->update_counters();
 642   }
 643 
 644   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 645                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 646 
 647   return result;
 648 }
 649 
 650 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 651   return evacuating && Thread::current()->is_Java_thread();
 652 }
 653 
 654 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 655   assert_heaplock_owned_by_current_thread();
 656 
 657   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 658     return allocate_large_memory(word_size);
 659   }
 660 
 661   // Not enough memory in free region set.
 662   // Coming out of full GC, it is possible that there is not
 663   // free region available, so current_index may not be valid.
 664   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 665 
 666   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 667 
 668   if (my_current_region == NULL) {
 669     return NULL; // No more room to make a new region. OOM.
 670   }
 671   assert(my_current_region != NULL, "should have a region at this point");
 672 
 673 #ifdef ASSERT
 674   if (in_collection_set(my_current_region)) {
 675     print_heap_regions();
 676   }
 677 #endif
 678   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 679   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 680 
 681   HeapWord* result = my_current_region->allocate(word_size);
 682 
 683   while (result == NULL) {
 684     // 2nd attempt. Try next region.
 685     _free_regions->increase_used(my_current_region->free());
 686     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 687     assert(next_region != my_current_region, "must not get current again");
 688     my_current_region = next_region;
 689 
 690     if (my_current_region == NULL) {
 691       return NULL; // No more room to make a new region. OOM.
 692     }
 693     assert(my_current_region != NULL, "should have a region at this point");
 694     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 695     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 696     result = my_current_region->allocate(word_size);
 697   }
 698 
 699   my_current_region->increase_live_data_words(word_size);
 700   increase_used(word_size * HeapWordSize);
 701   _free_regions->increase_used(word_size * HeapWordSize);
 702   return result;
 703 }
 704 
 705 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 706   assert_heaplock_owned_by_current_thread();
 707 
 708   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 709   if (required_regions > _max_regions) return NULL;
 710 
 711   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 712 
 713   HeapWord* result = NULL;
 714 
 715   if (r != NULL)  {
 716     result = r->bottom();
 717 
 718     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 719                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 720   } else {
 721     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 722                              (words * HeapWordSize) / K, p2i(result));
 723   }
 724 
 725 
 726   return result;
 727 
 728 }
 729 
 730 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 731                                         bool*  gc_overhead_limit_was_exceeded) {
 732 
 733 #ifdef ASSERT
 734   if (ShenandoahVerify && _numAllocs > 1000000) {
 735     _numAllocs = 0;
 736   }
 737   _numAllocs++;
 738 #endif
 739   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 740   HeapWord* result = filler + BrooksPointer::word_size();
 741   if (filler != NULL) {
 742     BrooksPointer::initialize(oop(result));
 743     _bytes_allocated_since_cm += size * HeapWordSize;
 744 
 745     assert(! in_collection_set(result), "never allocate in targetted region");
 746     return result;
 747   } else {
 748     /*
 749     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 750                   size, used(), _bytes_allocated_since_cm);
 751     {
 752       print_heap_regions();
 753       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 754       _free_regions->print();
 755     }
 756     */
 757     return NULL;
 758   }
 759 }
 760 
 761 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 762 private:
 763   ShenandoahHeap* _heap;
 764   Thread* _thread;
 765   public:
 766   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 767     _heap(heap), _thread(Thread::current()) {
 768   }
 769 
 770   void do_object(oop p) {
 771 
 772     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 773 
 774     assert(_heap->is_marked_complete(p), "expect only marked objects");
 775     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 776       _heap->evacuate_object(p, _thread);
 777     }
 778   }
 779 };
 780 
 781 #ifdef ASSERT
 782 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 783 
 784 public:
 785 
 786   void do_object(oop p) {
 787     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 788       oop p_prime = oopDesc::bs()->read_barrier(p);
 789       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 790       if (p->klass() != p_prime->klass()) {
 791         tty->print_cr("copy has different class than original:");
 792         p->klass()->print_on(tty);
 793         p_prime->klass()->print_on(tty);
 794       }
 795       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 796       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 797       assert(p->size() == p_prime->size(), "Should be the same size");
 798       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 799     }
 800   }
 801 };
 802 
 803 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 804   VerifyEvacuatedObjectClosure verify_evacuation;
 805   marked_object_iterate(from_region, &verify_evacuation);
 806 }
 807 #endif
 808 
 809 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 810 
 811   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 812 
 813   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 814 
 815   marked_object_iterate(from_region, &evacuate_region);
 816 
 817 #ifdef ASSERT
 818   if (ShenandoahVerify && ! cancelled_concgc()) {
 819     verify_evacuated_region(from_region);
 820   }
 821 #endif
 822 }
 823 
 824 class ParallelEvacuationTask : public AbstractGangTask {
 825 private:
 826   ShenandoahHeap* _sh;
 827   ShenandoahCollectionSet* _cs;
 828 
 829 public:
 830   ParallelEvacuationTask(ShenandoahHeap* sh,
 831                          ShenandoahCollectionSet* cs) :
 832     AbstractGangTask("Parallel Evacuation Task"),
 833     _cs(cs),
 834     _sh(sh) {}
 835 
 836   void work(uint worker_id) {
 837 
 838     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 839 
 840     while (from_hr != NULL) {
 841       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 842                                     worker_id,
 843                                     from_hr->region_number());
 844 
 845       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 846       _sh->parallel_evacuate_region(from_hr);
 847 
 848       if (_sh->cancelled_concgc()) {
 849         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 850         break;
 851       }
 852       from_hr = _cs->claim_next();
 853     }
 854   }
 855 };
 856 
 857 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 858 private:
 859   ShenandoahHeap* _heap;
 860   size_t _bytes_reclaimed;
 861 public:
 862   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 863 
 864   bool doHeapRegion(ShenandoahHeapRegion* r) {
 865 
 866     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 867 
 868     if (_heap->in_collection_set(r)) {
 869       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 870       _heap->decrease_used(r->used());
 871       _bytes_reclaimed += r->used();
 872       r->recycle();
 873     }
 874 
 875     return false;
 876   }
 877   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 878   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 879 };
 880 
 881 void ShenandoahHeap::recycle_dirty_regions() {
 882   RecycleDirtyRegionsClosure cl;
 883   cl.clear_bytes_reclaimed();
 884 
 885   heap_region_iterate(&cl);
 886 
 887   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 888   if (! cancelled_concgc()) {
 889     clear_cset_fast_test();
 890   }
 891 }
 892 
 893 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 894   return _free_regions;
 895 }
 896 
 897 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 898   _ordered_regions->print(st);
 899 }
 900 
 901 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 902 private:
 903   int _index;
 904   const char* _prefix;
 905 
 906 public:
 907   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 908 
 909 private:
 910   template <class T>
 911   inline void do_oop_work(T* p) {
 912     oop o = oopDesc::load_decode_heap_oop(p);
 913     if (o != NULL) {
 914       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 915         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 916                       _prefix, _index,
 917                       p2i(p), p2i(o),
 918                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 919                       o->klass()->internal_name(), p2i(o->klass()));
 920       } else {
 921         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 922                       _prefix, _index,
 923                       p2i(p), p2i(o));
 924       }
 925     } else {
 926       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 927     }
 928     _index++;
 929   }
 930 
 931 public:
 932   void do_oop(oop* p) {
 933     do_oop_work(p);
 934   }
 935 
 936   void do_oop(narrowOop* p) {
 937     do_oop_work(p);
 938   }
 939 
 940 };
 941 
 942 class PrintAllRefsObjectClosure : public ObjectClosure {
 943   const char* _prefix;
 944 
 945 public:
 946   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 947 
 948   void do_object(oop p) {
 949     if (ShenandoahHeap::heap()->is_in(p)) {
 950         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 951                       _prefix, p2i(p),
 952                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 953                       p->klass()->internal_name(), p2i(p->klass()));
 954         PrintAllRefsOopClosure cl(_prefix);
 955         p->oop_iterate(&cl);
 956       }
 957   }
 958 };
 959 
 960 void ShenandoahHeap::print_all_refs(const char* prefix) {
 961   tty->print_cr("printing all references in the heap");
 962   tty->print_cr("root references:");
 963 
 964   ensure_parsability(false);
 965 
 966   PrintAllRefsOopClosure cl(prefix);
 967   roots_iterate(&cl);
 968 
 969   tty->print_cr("heap references:");
 970   PrintAllRefsObjectClosure cl2(prefix);
 971   object_iterate(&cl2);
 972 }
 973 
 974 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 975 private:
 976   ShenandoahHeap*  _heap;
 977 
 978 public:
 979   VerifyAfterMarkingOopClosure() :
 980     _heap(ShenandoahHeap::heap()) { }
 981 
 982 private:
 983   template <class T>
 984   inline void do_oop_work(T* p) {
 985     oop o = oopDesc::load_decode_heap_oop(p);
 986     if (o != NULL) {
 987       if (! _heap->is_marked_complete(o)) {
 988         _heap->print_heap_regions();
 989         _heap->print_all_refs("post-mark");
 990         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
 991                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
 992         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
 993 
 994         tty->print_cr("oop class: %s", o->klass()->internal_name());
 995         if (_heap->is_in(p)) {
 996           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
 997           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
 998           referrer->print();
 999           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1000         }
1001         tty->print_cr("heap region containing object:");
1002         _heap->heap_region_containing(o)->print();
1003         tty->print_cr("heap region containing referrer:");
1004         _heap->heap_region_containing(p)->print();
1005         tty->print_cr("heap region containing forwardee:");
1006         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1007       }
1008       assert(o->is_oop(), "oop must be an oop");
1009       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1010       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1011         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1012                       p2i(p),
1013                       BOOL_TO_STR(_heap->in_collection_set(p)),
1014                       p2i(o),
1015                       BOOL_TO_STR(_heap->in_collection_set(o)),
1016                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1017                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1018         tty->print_cr("oop class: %s", o->klass()->internal_name());
1019       }
1020       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1021       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1022       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1023     }
1024   }
1025 
1026 public:
1027   void do_oop(oop* p) {
1028     do_oop_work(p);
1029   }
1030 
1031   void do_oop(narrowOop* p) {
1032     do_oop_work(p);
1033   }
1034 
1035 };
1036 
1037 void ShenandoahHeap::verify_heap_after_marking() {
1038 
1039   verify_heap_size_consistency();
1040 
1041   log_trace(gc)("verifying heap after marking");
1042 
1043   VerifyAfterMarkingOopClosure cl;
1044   roots_iterate(&cl);
1045   ObjectToOopClosure objs(&cl);
1046   object_iterate(&objs);
1047 }
1048 
1049 
1050 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1051   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1052 
1053   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1054   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1055   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1056   uint index = r->region_number();
1057 
1058 
1059   assert(!r->has_live(), "liveness must be zero");
1060 
1061   for(size_t i = 0; i < required_regions; i++) {
1062 
1063     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1064 
1065     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1066            "expect correct humongous start or continuation");
1067 
1068     if (log_is_enabled(Debug, gc, humongous)) {
1069       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1070       ResourceMark rm;
1071       outputStream* out = Log(gc, humongous)::debug_stream();
1072       region->print_on(out);
1073     }
1074 
1075     region->recycle();
1076     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1077   }
1078 }
1079 
1080 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1081 
1082   bool doHeapRegion(ShenandoahHeapRegion* r) {
1083     ShenandoahHeap* heap = ShenandoahHeap::heap();
1084 
1085     if (r->is_humongous_start()) {
1086       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1087       if (! heap->is_marked_complete(humongous_obj)) {
1088 
1089         heap->reclaim_humongous_region_at(r);
1090       }
1091     }
1092     return false;
1093   }
1094 };
1095 
1096 #ifdef ASSERT
1097 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1098   bool doHeapRegion(ShenandoahHeapRegion* r) {
1099     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1100     return false;
1101   }
1102 };
1103 #endif
1104 
1105 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1106   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1107 
1108   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1109 
1110   if (!cancelled_concgc()) {
1111 
1112     recycle_dirty_regions();
1113 
1114     ensure_parsability(true);
1115 
1116 #ifdef ASSERT
1117     if (ShenandoahVerify) {
1118       verify_heap_after_marking();
1119     }
1120 #endif
1121 
1122     // NOTE: This needs to be done during a stop the world pause, because
1123     // putting regions into the collection set concurrently with Java threads
1124     // will create a race. In particular, acmp could fail because when we
1125     // resolve the first operand, the containing region might not yet be in
1126     // the collection set, and thus return the original oop. When the 2nd
1127     // operand gets resolved, the region could be in the collection set
1128     // and the oop gets evacuated. If both operands have originally been
1129     // the same, we get false negatives.
1130 
1131     {
1132       ShenandoahHeapLock lock(this);
1133       _collection_set->clear();
1134       _free_regions->clear();
1135 
1136       ShenandoahReclaimHumongousRegionsClosure reclaim;
1137       heap_region_iterate(&reclaim);
1138 
1139 #ifdef ASSERT
1140       CheckCollectionSetClosure ccsc;
1141       _ordered_regions->heap_region_iterate(&ccsc);
1142 #endif
1143 
1144     if (UseShenandoahMatrix) {
1145       int num = num_regions();
1146       int *connections = NEW_C_HEAP_ARRAY(int, num * num, mtGC);
1147       calculate_matrix(connections);
1148       print_matrix(connections);
1149       _shenandoah_policy->choose_collection_set(_collection_set, connections);
1150       FREE_C_HEAP_ARRAY(int,connections);
1151     } else {
1152       _shenandoah_policy->choose_collection_set(_collection_set);
1153     }
1154 
1155     _shenandoah_policy->choose_free_set(_free_regions);
1156     }
1157 
1158     if (UseShenandoahMatrix) {
1159       _collection_set->print();
1160     }
1161 
1162     _bytes_allocated_since_cm = 0;
1163 
1164     Universe::update_heap_info_at_gc();
1165   }
1166 }
1167 
1168 
1169 class RetireTLABClosure : public ThreadClosure {
1170 private:
1171   bool _retire;
1172 
1173 public:
1174   RetireTLABClosure(bool retire) : _retire(retire) {
1175   }
1176 
1177   void do_thread(Thread* thread) {
1178     thread->gclab().make_parsable(_retire);
1179   }
1180 };
1181 
1182 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1183   if (UseTLAB) {
1184     CollectedHeap::ensure_parsability(retire_tlabs);
1185     RetireTLABClosure cl(retire_tlabs);
1186     Threads::threads_do(&cl);
1187   }
1188 }
1189 
1190 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1191 private:
1192   ShenandoahHeap* _heap;
1193   Thread* _thread;
1194 public:
1195   ShenandoahEvacuateUpdateRootsClosure() :
1196     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1197   }
1198 
1199 private:
1200   template <class T>
1201   void do_oop_work(T* p) {
1202     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1203 
1204     T o = oopDesc::load_heap_oop(p);
1205     if (! oopDesc::is_null(o)) {
1206       oop obj = oopDesc::decode_heap_oop_not_null(o);
1207       if (_heap->in_collection_set(obj)) {
1208         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1209                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1210         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1211         if (oopDesc::unsafe_equals(resolved, obj)) {
1212           resolved = _heap->evacuate_object(obj, _thread);
1213         }
1214         oopDesc::encode_store_heap_oop(p, resolved);
1215       }
1216     }
1217 #ifdef ASSERT
1218     else {
1219       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1220       //               p2i(p),
1221       //               p2i((HeapWord*) obj),
1222       //               BOOL_TO_STR(_heap->is_in(obj)),
1223       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1224       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1225     }
1226 #endif
1227   }
1228 
1229 public:
1230   void do_oop(oop* p) {
1231     do_oop_work(p);
1232   }
1233   void do_oop(narrowOop* p) {
1234     do_oop_work(p);
1235   }
1236 };
1237 
1238 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1239   ShenandoahRootEvacuator* _rp;
1240 public:
1241 
1242   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1243     AbstractGangTask("Shenandoah evacuate and update roots"),
1244     _rp(rp)
1245   {
1246     // Nothing else to do.
1247   }
1248 
1249   void work(uint worker_id) {
1250     ShenandoahEvacuateUpdateRootsClosure cl;
1251     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1252 
1253     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1254   }
1255 };
1256 
1257 void ShenandoahHeap::evacuate_and_update_roots() {
1258 
1259   COMPILER2_PRESENT(DerivedPointerTable::clear());
1260 
1261 #ifdef ASSERT
1262   if (ShenandoahVerifyReadsToFromSpace) {
1263     set_from_region_protection(false);
1264   }
1265 #endif
1266 
1267   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1268   ClassLoaderDataGraph::clear_claimed_marks();
1269 
1270   {
1271     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1272     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1273     workers()->run_task(&roots_task);
1274   }
1275 
1276 #ifdef ASSERT
1277   if (ShenandoahVerifyReadsToFromSpace) {
1278     set_from_region_protection(true);
1279   }
1280 #endif
1281 
1282   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1283 
1284 }
1285 
1286 
1287 void ShenandoahHeap::do_evacuation() {
1288 
1289   parallel_evacuate();
1290 
1291   if (ShenandoahVerify && ! cancelled_concgc()) {
1292     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1293     if (Thread::current()->is_VM_thread()) {
1294       verify_after_evacuation.doit();
1295     } else {
1296       VMThread::execute(&verify_after_evacuation);
1297     }
1298   }
1299 
1300 }
1301 
1302 void ShenandoahHeap::parallel_evacuate() {
1303   log_develop_trace(gc)("starting parallel_evacuate");
1304 
1305   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1306 
1307   if (log_is_enabled(Trace, gc, region)) {
1308     ResourceMark rm;
1309     outputStream *out = Log(gc, region)::trace_stream();
1310     out->print("Printing all available regions");
1311     print_heap_regions(out);
1312   }
1313 
1314   if (log_is_enabled(Trace, gc, cset)) {
1315     ResourceMark rm;
1316     outputStream *out = Log(gc, cset)::trace_stream();
1317     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1318     _collection_set->print(out);
1319 
1320     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1321     _free_regions->print(out);
1322   }
1323 
1324   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1325 
1326 
1327   workers()->run_task(&evacuationTask);
1328 
1329   if (log_is_enabled(Trace, gc, cset)) {
1330     ResourceMark rm;
1331     outputStream *out = Log(gc, cset)::trace_stream();
1332     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1333                _collection_set->count());
1334 
1335     _collection_set->print(out);
1336 
1337     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1338                _free_regions->count());
1339     _free_regions->print(out);
1340 
1341   }
1342 
1343   if (log_is_enabled(Trace, gc, region)) {
1344     ResourceMark rm;
1345     outputStream *out = Log(gc, region)::trace_stream();
1346     out->print_cr("all regions after evacuation:");
1347     print_heap_regions(out);
1348   }
1349 
1350   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1351 }
1352 
1353 class VerifyEvacuationClosure: public ExtendedOopClosure {
1354 private:
1355   ShenandoahHeap*  _heap;
1356   ShenandoahHeapRegion* _from_region;
1357 
1358 public:
1359   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1360     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1361 private:
1362   template <class T>
1363   inline void do_oop_work(T* p) {
1364     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1365     if (! oopDesc::is_null(heap_oop)) {
1366       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1367     }
1368   }
1369 
1370 public:
1371   void do_oop(oop* p)       {
1372     do_oop_work(p);
1373   }
1374 
1375   void do_oop(narrowOop* p) {
1376     do_oop_work(p);
1377   }
1378 
1379 };
1380 
1381 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1382 
1383   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1384 
1385   CodeBlobToOopClosure blobsCl(cl, false);
1386   CLDToOopClosure cldCl(cl);
1387 
1388   ClassLoaderDataGraph::clear_claimed_marks();
1389 
1390   ShenandoahRootProcessor rp(this, 1);
1391   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1392 }
1393 
1394 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1395 
1396   VerifyEvacuationClosure rootsCl(from_region);
1397   roots_iterate(&rootsCl);
1398 
1399 }
1400 
1401 bool ShenandoahHeap::supports_tlab_allocation() const {
1402   return true;
1403 }
1404 
1405 
1406 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1407   size_t idx = _free_regions->current_index();
1408   ShenandoahHeapRegion* current = _free_regions->get(idx);
1409   if (current == NULL) {
1410     return 0;
1411   } else if (current->free() > MinTLABSize) {
1412     // Current region has enough space left, can use it.
1413     return current->free();
1414   } else {
1415     // No more space in current region, we will take next free region
1416     // on the next TLAB allocation.
1417     return ShenandoahHeapRegion::RegionSizeBytes;
1418   }
1419 }
1420 
1421 size_t ShenandoahHeap::max_tlab_size() const {
1422   return ShenandoahHeapRegion::RegionSizeBytes;
1423 }
1424 
1425 class ResizeGCLABClosure : public ThreadClosure {
1426 public:
1427   void do_thread(Thread* thread) {
1428     thread->gclab().resize();
1429   }
1430 };
1431 
1432 void ShenandoahHeap::resize_all_tlabs() {
1433   CollectedHeap::resize_all_tlabs();
1434 
1435   ResizeGCLABClosure cl;
1436   Threads::threads_do(&cl);
1437 }
1438 
1439 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1440 public:
1441   void do_thread(Thread* thread) {
1442     thread->gclab().accumulate_statistics();
1443     thread->gclab().initialize_statistics();
1444   }
1445 };
1446 
1447 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1448   AccumulateStatisticsGCLABClosure cl;
1449   Threads::threads_do(&cl);
1450 }
1451 
1452 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1453   return true;
1454 }
1455 
1456 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1457   // Overridden to do nothing.
1458   return new_obj;
1459 }
1460 
1461 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1462   return true;
1463 }
1464 
1465 bool ShenandoahHeap::card_mark_must_follow_store() const {
1466   return false;
1467 }
1468 
1469 void ShenandoahHeap::collect(GCCause::Cause cause) {
1470   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1471   if (GCCause::is_user_requested_gc(cause)) {
1472     if (! DisableExplicitGC) {
1473       _concurrent_gc_thread->do_full_gc(cause);
1474     }
1475   } else if (cause == GCCause::_allocation_failure) {
1476     collector_policy()->set_should_clear_all_soft_refs(true);
1477     _concurrent_gc_thread->do_full_gc(cause);
1478   }
1479 }
1480 
1481 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1482   //assert(false, "Shouldn't need to do full collections");
1483 }
1484 
1485 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1486   Unimplemented();
1487   return NULL;
1488 
1489 }
1490 
1491 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1492   return _shenandoah_policy;
1493 }
1494 
1495 
1496 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1497   Space* sp = heap_region_containing(addr);
1498   if (sp != NULL) {
1499     return sp->block_start(addr);
1500   }
1501   return NULL;
1502 }
1503 
1504 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1505   Space* sp = heap_region_containing(addr);
1506   assert(sp != NULL, "block_size of address outside of heap");
1507   return sp->block_size(addr);
1508 }
1509 
1510 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1511   Space* sp = heap_region_containing(addr);
1512   return sp->block_is_obj(addr);
1513 }
1514 
1515 jlong ShenandoahHeap::millis_since_last_gc() {
1516   return 0;
1517 }
1518 
1519 void ShenandoahHeap::prepare_for_verify() {
1520   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1521     ensure_parsability(false);
1522   }
1523 }
1524 
1525 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1526   workers()->print_worker_threads_on(st);
1527 }
1528 
1529 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1530   workers()->threads_do(tcl);
1531 }
1532 
1533 void ShenandoahHeap::print_tracing_info() const {
1534   if (log_is_enabled(Info, gc, stats)) {
1535     ResourceMark rm;
1536     outputStream* out = Log(gc, stats)::info_stream();
1537     _shenandoah_policy->print_tracing_info(out);
1538   }
1539 }
1540 
1541 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1542 private:
1543   ShenandoahHeap*  _heap;
1544   VerifyOption     _vo;
1545   bool             _failures;
1546 public:
1547   // _vo == UsePrevMarking -> use "prev" marking information,
1548   // _vo == UseNextMarking -> use "next" marking information,
1549   // _vo == UseMarkWord    -> use mark word from object header.
1550   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1551     _heap(ShenandoahHeap::heap()),
1552     _vo(vo),
1553     _failures(false) { }
1554 
1555   bool failures() { return _failures; }
1556 
1557 private:
1558   template <class T>
1559   inline void do_oop_work(T* p) {
1560     oop obj = oopDesc::load_decode_heap_oop(p);
1561     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1562       { // Just for debugging.
1563         tty->print_cr("Root location "PTR_FORMAT
1564                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1565         //      obj->print_on(tty);
1566       }
1567     }
1568     guarantee(obj->is_oop_or_null(), "is oop or null");
1569   }
1570 
1571 public:
1572   void do_oop(oop* p)       {
1573     do_oop_work(p);
1574   }
1575 
1576   void do_oop(narrowOop* p) {
1577     do_oop_work(p);
1578   }
1579 
1580 };
1581 
1582 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1583 private:
1584   ShenandoahVerifyRootsClosure _rootsCl;
1585 public:
1586   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1587     _rootsCl(rc) {};
1588 
1589   void do_object(oop p) {
1590     _rootsCl.do_oop(&p);
1591   }
1592 };
1593 
1594 class ShenandoahVerifyKlassClosure: public KlassClosure {
1595   OopClosure *_oop_closure;
1596  public:
1597   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1598   void do_klass(Klass* k) {
1599     k->oops_do(_oop_closure);
1600   }
1601 };
1602 
1603 void ShenandoahHeap::verify(VerifyOption vo) {
1604   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1605 
1606     ShenandoahVerifyRootsClosure rootsCl(vo);
1607 
1608     assert(Thread::current()->is_VM_thread(),
1609            "Expected to be executed serially by the VM thread at this point");
1610 
1611     roots_iterate(&rootsCl);
1612 
1613     bool failures = rootsCl.failures();
1614     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1615 
1616     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1617 
1618     object_iterate(&heapCl);
1619     // TODO: Implement rest of it.
1620   } else {
1621     tty->print("(SKIPPING roots, heapRegions, remset) ");
1622   }
1623 }
1624 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1625   return _free_regions->capacity();
1626 }
1627 
1628 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1629   ObjectClosure* _cl;
1630 public:
1631   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1632   bool doHeapRegion(ShenandoahHeapRegion* r) {
1633     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1634     return false;
1635   }
1636 };
1637 
1638 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1639   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1640   heap_region_iterate(&blk, false, true);
1641 }
1642 
1643 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1644 private:
1645   ShenandoahHeap* _heap;
1646 
1647 public:
1648   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1649 
1650 private:
1651   template <class T>
1652   inline void do_oop_work(T* p) {
1653     T o = oopDesc::load_heap_oop(p);
1654     if (!oopDesc::is_null(o)) {
1655       oop obj = oopDesc::decode_heap_oop_not_null(o);
1656       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1657     }
1658   }
1659 public:
1660   void do_oop(oop* p) {
1661     do_oop_work(p);
1662   }
1663   void do_oop(narrowOop* p) {
1664     do_oop_work(p);
1665   }
1666 };
1667 
1668 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1669 private:
1670   ObjectClosure* _cl;
1671 public:
1672   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1673 
1674   virtual void do_object(oop obj) {
1675     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1676             "avoid double-counting: only non-forwarded objects here");
1677 
1678     // Fix up the ptrs.
1679     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1680     obj->oop_iterate(&adjust_ptrs);
1681 
1682     // Can reply the object now:
1683     _cl->do_object(obj);
1684   }
1685 };
1686 
1687 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1688   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1689 
1690   // Safe iteration does objects only with correct references.
1691   // This is why we skip dirty regions that have stale copies of objects,
1692   // and fix up the pointers in the returned objects.
1693 
1694   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1695   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1696   heap_region_iterate(&blk,
1697                       /* skip_dirty_regions = */ true,
1698                       /* skip_humongous_continuations = */ true);
1699 
1700   _need_update_refs = false; // already updated the references
1701 }
1702 
1703 // Apply blk->doHeapRegion() on all committed regions in address order,
1704 // terminating the iteration early if doHeapRegion() returns true.
1705 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1706   for (size_t i = 0; i < _num_regions; i++) {
1707     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1708     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1709       continue;
1710     }
1711     if (skip_dirty_regions && in_collection_set(current)) {
1712       continue;
1713     }
1714     if (blk->doHeapRegion(current)) {
1715       return;
1716     }
1717   }
1718 }
1719 
1720 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1721   ShenandoahHeap* sh;
1722 public:
1723   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1724 
1725   bool doHeapRegion(ShenandoahHeapRegion* r) {
1726     r->clear_live_data();
1727     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1728     return false;
1729   }
1730 };
1731 
1732 
1733 void ShenandoahHeap::start_concurrent_marking() {
1734 
1735   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1736   accumulate_statistics_all_tlabs();
1737   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1738 
1739   set_concurrent_mark_in_progress(true);
1740   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1741   if (UseTLAB) {
1742     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1743     ensure_parsability(true);
1744     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1745   }
1746 
1747   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1748   _used_start_gc = used();
1749 
1750 #ifdef ASSERT
1751   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1752     ensure_parsability(false);
1753     print_all_refs("pre-mark");
1754   }
1755 #endif
1756 
1757   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1758   ClearLivenessClosure clc(this);
1759   heap_region_iterate(&clc);
1760   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1761 
1762   // print_all_refs("pre -mark");
1763 
1764   // oopDesc::_debug = true;
1765 
1766   // Make above changes visible to worker threads
1767   OrderAccess::fence();
1768 
1769   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1770   concurrentMark()->init_mark_roots();
1771   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1772 
1773   //  print_all_refs("pre-mark2");
1774 }
1775 
1776 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1777 
1778   ShenandoahHeap* _sh;
1779 
1780 public:
1781   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1782 
1783   template<class T> void do_oop_nv(T* p) {
1784     T heap_oop = oopDesc::load_heap_oop(p);
1785     if (!oopDesc::is_null(heap_oop)) {
1786       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1787       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1788                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1789                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1790                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1791                 obj->klass()->external_name(),
1792                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1793                 );
1794       obj = oopDesc::bs()->read_barrier(obj);
1795       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1796       guarantee(obj->is_oop(), "is_oop");
1797       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1798     }
1799   }
1800 
1801   void do_oop(oop* p)       { do_oop_nv(p); }
1802   void do_oop(narrowOop* p) { do_oop_nv(p); }
1803 
1804 };
1805 
1806 void ShenandoahHeap::verify_heap_after_evacuation() {
1807 
1808   verify_heap_size_consistency();
1809 
1810   ensure_parsability(false);
1811 
1812   VerifyAfterEvacuationClosure cl;
1813   roots_iterate(&cl);
1814 
1815   ObjectToOopClosure objs(&cl);
1816   object_iterate(&objs);
1817 
1818 }
1819 
1820 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1821 public:
1822   bool doHeapRegion(ShenandoahHeapRegion* r) {
1823     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1824     return false;
1825   }
1826 };
1827 
1828 void ShenandoahHeap::swap_mark_bitmaps() {
1829   // Swap bitmaps.
1830   CMBitMap* tmp1 = _complete_mark_bit_map;
1831   _complete_mark_bit_map = _next_mark_bit_map;
1832   _next_mark_bit_map = tmp1;
1833 
1834   // Swap top-at-mark-start pointers
1835   HeapWord** tmp2 = _complete_top_at_mark_starts;
1836   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1837   _next_top_at_mark_starts = tmp2;
1838 
1839   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1840   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1841   _next_top_at_mark_starts_base = tmp3;
1842 }
1843 
1844 void ShenandoahHeap::stop_concurrent_marking() {
1845   assert(concurrent_mark_in_progress(), "How else could we get here?");
1846   if (! cancelled_concgc()) {
1847     // If we needed to update refs, and concurrent marking has been cancelled,
1848     // we need to finish updating references.
1849     set_need_update_refs(false);
1850     swap_mark_bitmaps();
1851   }
1852   set_concurrent_mark_in_progress(false);
1853 
1854   if (log_is_enabled(Trace, gc, region)) {
1855     ResourceMark rm;
1856     outputStream* out = Log(gc, region)::trace_stream();
1857     print_heap_regions(out);
1858   }
1859 
1860 }
1861 
1862 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1863   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1864   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1865 }
1866 
1867 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1868   // Note: it is important to first release the _evacuation_in_progress flag here,
1869   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1870   // in case a VM task is pending.
1871   set_evacuation_in_progress(in_progress);
1872   MutexLocker mu(Threads_lock);
1873   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1874 }
1875 
1876 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1877   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1878   set_evacuation_in_progress(in_progress);
1879   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1880 }
1881 
1882 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1883   _evacuation_in_progress = in_progress ? 1 : 0;
1884   OrderAccess::fence();
1885 }
1886 
1887 void ShenandoahHeap::verify_copy(oop p,oop c){
1888     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1889     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1890     if (p->klass() != c->klass()) {
1891       print_heap_regions();
1892     }
1893     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1894     assert(p->size() == c->size(), "verify size");
1895     // Object may have been locked between copy and verification
1896     //    assert(p->mark() == c->mark(), "verify mark");
1897     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1898   }
1899 
1900 void ShenandoahHeap::oom_during_evacuation() {
1901   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1902                         Thread::current()->osthread()->thread_id());
1903 
1904   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1905   collector_policy()->set_should_clear_all_soft_refs(true);
1906   concurrent_thread()->try_set_full_gc();
1907   cancel_concgc(_oom_evacuation);
1908 
1909   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1910     assert(! Threads_lock->owned_by_self(), "must not hold Threads_lock here");
1911     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1912     while (_evacuation_in_progress) { // wait.
1913       Thread::current()->_ParkEvent->park(1);
1914     }
1915   }
1916 
1917 }
1918 
1919 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1920   // Initialize Brooks pointer for the next object
1921   HeapWord* result = obj + BrooksPointer::word_size();
1922   BrooksPointer::initialize(oop(result));
1923   return result;
1924 }
1925 
1926 uint ShenandoahHeap::oop_extra_words() {
1927   return BrooksPointer::word_size();
1928 }
1929 
1930 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1931   size_t base = _num_regions;
1932   ensure_new_regions(num_regions);
1933   for (size_t i = 0; i < num_regions; i++) {
1934     size_t new_region_index = i + base;
1935     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1936     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1937 
1938     if (log_is_enabled(Trace, gc, region)) {
1939       ResourceMark rm;
1940       outputStream* out = Log(gc, region)::trace_stream();
1941       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
1942       new_region->print_on(out);
1943     }
1944 
1945     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
1946     _ordered_regions->add_region(new_region);
1947     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
1948     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1949     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1950 
1951     _free_regions->add_region(new_region);
1952   }
1953 }
1954 
1955 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
1956 
1957   size_t num_regions = _num_regions;
1958   size_t new_num_regions = num_regions + new_regions;
1959   assert(new_num_regions <= _max_regions, "we checked this earlier");
1960 
1961   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
1962   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
1963   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
1964   assert(success, "should always be able to expand by requested size");
1965 
1966   _num_regions = new_num_regions;
1967 
1968 }
1969 
1970 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1971   _heap(ShenandoahHeap::heap_no_check()) {
1972 }
1973 
1974 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
1975   _heap = heap;
1976 }
1977 
1978 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1979 
1980   assert(_heap != NULL, "sanity");
1981   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1982 #ifdef ASSERT
1983   if (_heap->concurrent_mark_in_progress()) {
1984     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1985   }
1986 #endif
1987   assert(!oopDesc::is_null(obj), "null");
1988   return _heap->is_marked_next(obj);
1989 }
1990 
1991 void ShenandoahHeap::ref_processing_init() {
1992   MemRegion mr = reserved_region();
1993 
1994   isAlive.init(ShenandoahHeap::heap());
1995   assert(_max_workers > 0, "Sanity");
1996 
1997   _ref_processor =
1998     new ReferenceProcessor(mr,    // span
1999                            ParallelRefProcEnabled,
2000                            // mt processing
2001                            _max_workers,
2002                            // degree of mt processing
2003                            true,
2004                            // mt discovery
2005                            _max_workers,
2006                            // degree of mt discovery
2007                            false,
2008                            // Reference discovery is not atomic
2009                            &isAlive);
2010 }
2011 
2012 #ifdef ASSERT
2013 void ShenandoahHeap::set_from_region_protection(bool protect) {
2014   for (uint i = 0; i < _num_regions; i++) {
2015     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2016     if (region != NULL && in_collection_set(region)) {
2017       if (protect) {
2018         region->memProtectionOn();
2019       } else {
2020         region->memProtectionOff();
2021       }
2022     }
2023   }
2024 }
2025 #endif
2026 
2027 size_t ShenandoahHeap::num_regions() {
2028   return _num_regions;
2029 }
2030 
2031 size_t ShenandoahHeap::max_regions() {
2032   return _max_regions;
2033 }
2034 
2035 GCTracer* ShenandoahHeap::tracer() {
2036   return shenandoahPolicy()->tracer();
2037 }
2038 
2039 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2040   return _free_regions->used();
2041 }
2042 
2043 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2044   if (try_cancel_concgc()) {
2045     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2046     _shenandoah_policy->report_concgc_cancelled();
2047   }
2048 }
2049 
2050 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2051   if (try_cancel_concgc()) {
2052     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2053     _shenandoah_policy->report_concgc_cancelled();
2054   }
2055 }
2056 
2057 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2058   switch (cause) {
2059     case _oom_evacuation:
2060       return "Out of memory for evacuation";
2061     case _vm_stop:
2062       return "Stopping VM";
2063     default:
2064       return "Unknown";
2065   }
2066 }
2067 
2068 void ShenandoahHeap::clear_cancelled_concgc() {
2069   set_cancelled_concgc(false);
2070 }
2071 
2072 uint ShenandoahHeap::max_workers() {
2073   return _max_workers;
2074 }
2075 
2076 void ShenandoahHeap::stop() {
2077   // The shutdown sequence should be able to terminate when GC is running.
2078 
2079   // Step 1. Notify control thread that we are in shutdown.
2080   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2081   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2082   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2083 
2084   // Step 2. Notify GC workers that we are cancelling GC.
2085   cancel_concgc(_vm_stop);
2086 
2087   // Step 3. Wait until GC worker exits normally.
2088   _concurrent_gc_thread->stop();
2089 }
2090 
2091 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2092 
2093   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2094   workers()->run_task(&shenandoah_unlink_task);
2095 
2096   //  if (G1StringDedup::is_enabled()) {
2097   //    G1StringDedup::unlink(is_alive);
2098   //  }
2099 }
2100 
2101 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2102   _need_update_refs = need_update_refs;
2103 }
2104 
2105 //fixme this should be in heapregionset
2106 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2107   size_t region_idx = r->region_number() + 1;
2108   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2109   guarantee(next->region_number() == region_idx, "region number must match");
2110   while (next->is_humongous()) {
2111     region_idx = next->region_number() + 1;
2112     next = _ordered_regions->get(region_idx);
2113     guarantee(next->region_number() == region_idx, "region number must match");
2114   }
2115   return next;
2116 }
2117 
2118 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2119   _in_cset_fast_test_base[region_index] = b;
2120 }
2121 
2122 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2123   return _monitoring_support;
2124 }
2125 
2126 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2127   return _complete_mark_bit_map;
2128 }
2129 
2130 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2131   return _next_mark_bit_map;
2132 }
2133 
2134 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2135   _free_regions->add_region(r);
2136 }
2137 
2138 void ShenandoahHeap::clear_free_regions() {
2139   _free_regions->clear();
2140 }
2141 
2142 address ShenandoahHeap::in_cset_fast_test_addr() {
2143   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2144 }
2145 
2146 address ShenandoahHeap::cancelled_concgc_addr() {
2147   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2148 }
2149 
2150 void ShenandoahHeap::clear_cset_fast_test() {
2151   assert(_in_cset_fast_test_base != NULL, "sanity");
2152   memset(_in_cset_fast_test_base, false,
2153          _in_cset_fast_test_length * sizeof(bool));
2154 }
2155 
2156 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2157   return ShenandoahMaxRegionSize;
2158 }
2159 
2160 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2161   return _bytes_allocated_since_cm;
2162 }
2163 
2164 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2165   _bytes_allocated_since_cm = bytes;
2166 }
2167 
2168 size_t ShenandoahHeap::max_allocated_gc() {
2169   return _max_allocated_gc;
2170 }
2171 
2172 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2173   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2174   _next_top_at_mark_starts[index] = addr;
2175 }
2176 
2177 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2178   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2179   return _next_top_at_mark_starts[index];
2180 }
2181 
2182 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2183   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2184   _complete_top_at_mark_starts[index] = addr;
2185 }
2186 
2187 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2188   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2189   return _complete_top_at_mark_starts[index];
2190 }
2191 
2192 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2193   _full_gc_in_progress = in_progress;
2194 }
2195 
2196 bool ShenandoahHeap::is_full_gc_in_progress() const {
2197   return _full_gc_in_progress;
2198 }
2199 
2200 class NMethodOopInitializer : public OopClosure {
2201 private:
2202   ShenandoahHeap* _heap;
2203 public:
2204   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2205   }
2206 
2207 private:
2208   template <class T>
2209   inline void do_oop_work(T* p) {
2210     T o = oopDesc::load_heap_oop(p);
2211     if (! oopDesc::is_null(o)) {
2212       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2213       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2214       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2215         oopDesc::encode_store_heap_oop(p, obj2);
2216       }
2217     }
2218   }
2219 
2220 public:
2221   void do_oop(oop* o) {
2222     do_oop_work(o);
2223   }
2224   void do_oop(narrowOop* o) {
2225     do_oop_work(o);
2226   }
2227 };
2228 
2229 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2230   NMethodOopInitializer init;
2231   nm->oops_do(&init);
2232   nm->fix_oop_relocations();
2233 }
2234 
2235 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2236 }
2237 
2238 void ShenandoahHeap::pin_object(oop o) {
2239   heap_region_containing(o)->pin();
2240 }
2241 
2242 void ShenandoahHeap::unpin_object(oop o) {
2243   heap_region_containing(o)->unpin();
2244 }
2245 
2246 
2247 GCTimer* ShenandoahHeap::gc_timer() const {
2248   return _gc_timer;
2249 }
2250 
2251 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2252 private:
2253   int _x;
2254   int *_matrix;
2255   int _num_regions;
2256   oop _p;
2257 
2258 public:
2259   RecordAllRefsOopClosure(int *matrix, int x, size_t num_regions, oop p) :
2260     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2261 
2262   template <class T>
2263   void do_oop_work(T* p) {
2264     oop o = oopDesc::load_decode_heap_oop(p);
2265     if (o != NULL) {
2266       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2267         int y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2268         _matrix[_x * _num_regions + y]++;
2269       }
2270     }
2271   }
2272   void do_oop(oop* p) {
2273     do_oop_work(p);
2274   }
2275 
2276   void do_oop(narrowOop* p) {
2277     do_oop_work(p);
2278   }
2279 
2280 };
2281 
2282 class RecordAllRefsObjectClosure : public ObjectClosure {
2283   int *_matrix;
2284   size_t _num_regions;
2285 
2286 public:
2287   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2288     _matrix(matrix), _num_regions(num_regions) {}
2289 
2290   void do_object(oop p) {
2291     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2292       int x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2293       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2294       p->oop_iterate(&cl);
2295     }
2296   }
2297 };
2298 void ShenandoahHeap::calculate_matrix(int* connections) {
2299   log_develop_trace(gc)("calculating matrix");
2300   ensure_parsability(false);
2301   int num = num_regions();
2302 
2303   for (int i = 0; i < num; i++) {
2304     for (int j = 0; j < num; j++) {
2305       connections[i * num + j] = 0;
2306     }
2307   }
2308 
2309   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2310   roots_iterate(&cl);
2311 
2312   RecordAllRefsObjectClosure cl2(connections, num);
2313   object_iterate(&cl2);
2314 
2315 }
2316 
2317 void ShenandoahHeap::print_matrix(int* connections) {
2318   int num = num_regions();
2319   int cs_regions = 0;
2320   int referenced = 0;
2321 
2322   for (int i = 0; i < num; i++) {
2323     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2324 
2325     int numReferencedRegions = 0;
2326     int numReferencedByRegions = 0;
2327 
2328     for (int j = 0; j < num; j++) {
2329       if (connections[i * num + j] > 0)
2330         numReferencedRegions++;
2331 
2332       if (connections [j * num + i] > 0)
2333         numReferencedByRegions++;
2334 
2335       cs_regions++;
2336       referenced += numReferencedByRegions;
2337     }
2338 
2339     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2340       tty->print("Region %d is referenced by %d regions {",
2341                  i, numReferencedByRegions);
2342       int col_count = 0;
2343       for (int j = 0; j < num; j++) {
2344         int foo = connections[j * num + i];
2345         if (foo > 0) {
2346           col_count++;
2347           if ((col_count % 10) == 0)
2348             tty->print("\n");
2349           tty->print("%d(%d), ", j,foo);
2350         }
2351       }
2352       tty->print("} \n");
2353     }
2354   }
2355 
2356   double avg = (double)referenced / (double) cs_regions;
2357   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2358 }
2359 
2360 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2361 private:
2362   size_t _garbage;
2363 public:
2364   ShenandoahCountGarbageClosure() : _garbage(0) {
2365   }
2366 
2367   bool doHeapRegion(ShenandoahHeapRegion* r) {
2368     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2369       _garbage += r->garbage();
2370     }
2371     return false;
2372   }
2373 
2374   size_t garbage() {
2375     return _garbage;
2376   }
2377 };
2378 
2379 size_t ShenandoahHeap::garbage() {
2380   ShenandoahCountGarbageClosure cl;
2381   heap_region_iterate(&cl);
2382   return cl.garbage();
2383 }
2384 
2385 #ifdef ASSERT
2386 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2387   assert(_heap_lock == locked, "must be locked");
2388   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2389 }
2390 
2391 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2392   Thread* thr = Thread::current();
2393   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2394          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2395   "must own heap lock or by VM thread at safepoint");
2396 }
2397 
2398 #endif