1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/g1/heapRegionBounds.inline.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  46 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  47 
  48 #include "runtime/vmThread.hpp"
  49 #include "services/mallocTracker.hpp"
  50 
  51 const char* ShenandoahHeap::name() const {
  52   return "Shenandoah";
  53 }
  54 
  55 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  56   HeapWord* cur = NULL;
  57   for (cur = start; cur < end; cur++) {
  58     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  59   }
  60 }
  61 
  62 class PrintHeapRegionsClosure : public
  63    ShenandoahHeapRegionClosure {
  64 private:
  65   outputStream* _st;
  66 public:
  67   PrintHeapRegionsClosure() : _st(tty) {}
  68   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  69 
  70   bool doHeapRegion(ShenandoahHeapRegion* r) {
  71     r->print_on(_st);
  72     return false;
  73   }
  74 };
  75 
  76 class ShenandoahPretouchTask : public AbstractGangTask {
  77 private:
  78   ShenandoahHeapRegionSet* _regions;
  79   const size_t _bitmap_size;
  80   const size_t _page_size;
  81   char* _bitmap0_base;
  82   char* _bitmap1_base;
  83 public:
  84   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  85                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  86                          size_t page_size) :
  87     AbstractGangTask("Shenandoah PreTouch",
  88                      Universe::is_fully_initialized() ? GCId::current_raw() :
  89                                                         // During VM initialization there is
  90                                                         // no GC cycle that this task can be
  91                                                         // associated with.
  92                                                         GCId::undefined()),
  93     _bitmap0_base(bitmap0_base),
  94     _bitmap1_base(bitmap1_base),
  95     _regions(regions),
  96     _bitmap_size(bitmap_size),
  97     _page_size(page_size) {
  98     _regions->clear_current_index();
  99   };
 100 
 101   virtual void work(uint worker_id) {
 102     ShenandoahHeapRegion* r = _regions->claim_next();
 103     while (r != NULL) {
 104       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 105                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 106       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 107 
 108       size_t start = r->region_number()       * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 109       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 110       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 111 
 112       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 113                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 114       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 115 
 116       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 117                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 118       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 119 
 120       r = _regions->claim_next();
 121     }
 122   }
 123 };
 124 
 125 jint ShenandoahHeap::initialize() {
 126   CollectedHeap::pre_initialize();
 127 
 128   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 129   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 130 
 131   Universe::check_alignment(max_byte_size,
 132                             ShenandoahHeapRegion::RegionSizeBytes,
 133                             "shenandoah heap");
 134   Universe::check_alignment(init_byte_size,
 135                             ShenandoahHeapRegion::RegionSizeBytes,
 136                             "shenandoah heap");
 137 
 138   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 139                                                  Arguments::conservative_max_heap_alignment());
 140   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 141 
 142   set_barrier_set(new ShenandoahBarrierSet(this));
 143   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 144   _storage.initialize(pgc_rs, init_byte_size);
 145 
 146   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 147   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 148   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 149   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 150   assert(init_byte_size == _initialSize, "tautology");
 151   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 152   _collection_set = new ShenandoahCollectionSet(_max_regions);
 153   _free_regions = new ShenandoahFreeSet(_max_regions);
 154 
 155   // Initialize fast collection set test structure.
 156   _in_cset_fast_test_length = _max_regions;
 157   _in_cset_fast_test_base =
 158                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 159   _in_cset_fast_test = _in_cset_fast_test_base -
 160                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 161 
 162   _next_top_at_mark_starts_base =
 163                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 164   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 165                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 166 
 167   _complete_top_at_mark_starts_base =
 168                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 169   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 170                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 171 
 172   size_t i = 0;
 173   for (i = 0; i < _num_regions; i++) {
 174     _in_cset_fast_test_base[i] = false; // Not in cset
 175     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 176     _complete_top_at_mark_starts_base[i] = bottom;
 177     _next_top_at_mark_starts_base[i] = bottom;
 178   }
 179 
 180   {
 181     ShenandoahHeapLock lock(this);
 182     for (i = 0; i < _num_regions; i++) {
 183       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 184                                                                regionSizeWords * i, regionSizeWords, i);
 185       _free_regions->add_region(current);
 186       _ordered_regions->add_region(current);
 187     }
 188   }
 189   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 190   _first_region = _ordered_regions->get(0);
 191   _first_region_bottom = _first_region->bottom();
 192   assert((((size_t) _first_region_bottom) &
 193           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 194          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 195 
 196   _numAllocs = 0;
 197 
 198   if (log_is_enabled(Trace, gc, region)) {
 199     ResourceMark rm;
 200     outputStream* out = Log(gc, region)::trace_stream();
 201     log_trace(gc, region)("All Regions");
 202     _ordered_regions->print(out);
 203     log_trace(gc, region)("Free Regions");
 204     _free_regions->print(out);
 205   }
 206 
 207   // The call below uses stuff (the SATB* things) that are in G1, but probably
 208   // belong into a shared location.
 209   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 210                                                SATB_Q_FL_lock,
 211                                                20 /*G1SATBProcessCompletedThreshold */,
 212                                                Shared_SATB_Q_lock);
 213 
 214   // Reserve space for prev and next bitmap.
 215   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 216   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 217 
 218   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 219 
 220   ReservedSpace bitmap0(bitmap_size, page_size);
 221   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 222   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 223   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 224 
 225   ReservedSpace bitmap1(bitmap_size, page_size);
 226   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 227   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 228   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 229 
 230   if (ShenandoahAlwaysPreTouch) {
 231     assert (!AlwaysPreTouch, "Should have been overridden");
 232 
 233     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 234     // before initialize() below zeroes it with initializing thread. For any given region,
 235     // we touch the region and the corresponding bitmaps from the same thread.
 236 
 237     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 238                        _ordered_regions->count(), page_size);
 239     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), bitmap_size, page_size);
 240     _workers->run_task(&cl);
 241   }
 242 
 243   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 244   _complete_mark_bit_map = &_mark_bit_map0;
 245 
 246   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 247   _next_mark_bit_map = &_mark_bit_map1;
 248 
 249   _monitoring_support = new ShenandoahMonitoringSupport(this);
 250 
 251   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 252 
 253   ShenandoahMarkCompact::initialize();
 254 
 255   return JNI_OK;
 256 }
 257 
 258 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 259   CollectedHeap(),
 260   _shenandoah_policy(policy),
 261   _concurrent_mark_in_progress(0),
 262   _evacuation_in_progress(0),
 263   _full_gc_in_progress(false),
 264   _free_regions(NULL),
 265   _collection_set(NULL),
 266   _bytes_allocated_since_cm(0),
 267   _bytes_allocated_during_cm(0),
 268   _max_allocated_gc(0),
 269   _allocated_last_gc(0),
 270   _used_start_gc(0),
 271   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 272   _ref_processor(NULL),
 273   _in_cset_fast_test(NULL),
 274   _in_cset_fast_test_base(NULL),
 275   _next_top_at_mark_starts(NULL),
 276   _next_top_at_mark_starts_base(NULL),
 277   _complete_top_at_mark_starts(NULL),
 278   _complete_top_at_mark_starts_base(NULL),
 279   _mark_bit_map0(),
 280   _mark_bit_map1(),
 281   _cancelled_concgc(false),
 282   _need_update_refs(false),
 283   _need_reset_bitmaps(false),
 284   _heap_lock(0),
 285 #ifdef ASSERT
 286   _heap_lock_owner(NULL),
 287 #endif
 288   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 289 
 290 {
 291   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 292   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 293   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 294 
 295   _scm = new ShenandoahConcurrentMark();
 296   _used = 0;
 297 
 298   _max_workers = MAX2(_max_workers, 1U);
 299   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 300                             /* are_GC_task_threads */true,
 301                             /* are_ConcurrentGC_threads */false);
 302   if (_workers == NULL) {
 303     vm_exit_during_initialization("Failed necessary allocation.");
 304   } else {
 305     _workers->initialize_workers();
 306   }
 307 }
 308 
 309 class ResetNextBitmapTask : public AbstractGangTask {
 310 private:
 311   ShenandoahHeapRegionSet* _regions;
 312 
 313 public:
 314   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 315     AbstractGangTask("Parallel Reset Bitmap Task"),
 316     _regions(regions) {
 317     _regions->clear_current_index();
 318   }
 319 
 320   void work(uint worker_id) {
 321     ShenandoahHeapRegion* region = _regions->claim_next();
 322     ShenandoahHeap* heap = ShenandoahHeap::heap();
 323     while (region != NULL) {
 324       HeapWord* bottom = region->bottom();
 325       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 326       if (top > bottom) {
 327         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 328       }
 329       region = _regions->claim_next();
 330     }
 331   }
 332 };
 333 
 334 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 335   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 336   workers->run_task(&task);
 337 }
 338 
 339 class ResetCompleteBitmapTask : public AbstractGangTask {
 340 private:
 341   ShenandoahHeapRegionSet* _regions;
 342 
 343 public:
 344   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 345     AbstractGangTask("Parallel Reset Bitmap Task"),
 346     _regions(regions) {
 347     _regions->clear_current_index();
 348   }
 349 
 350   void work(uint worker_id) {
 351     ShenandoahHeapRegion* region = _regions->claim_next();
 352     ShenandoahHeap* heap = ShenandoahHeap::heap();
 353     while (region != NULL) {
 354       HeapWord* bottom = region->bottom();
 355       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 356       if (top > bottom) {
 357         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 358       }
 359       region = _regions->claim_next();
 360     }
 361   }
 362 };
 363 
 364 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 365   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 366   workers->run_task(&task);
 367 }
 368 
 369 bool ShenandoahHeap::is_next_bitmap_clear() {
 370   HeapWord* start = _ordered_regions->bottom();
 371   HeapWord* end = _ordered_regions->end();
 372   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 373 }
 374 
 375 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 376   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 377 }
 378 
 379 void ShenandoahHeap::print_on(outputStream* st) const {
 380   st->print("Shenandoah Heap");
 381   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 382   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 383             p2i(reserved_region().start()),
 384             p2i(reserved_region().end()));
 385   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 386   if (_concurrent_mark_in_progress) {
 387     st->print("marking ");
 388   }
 389   if (_evacuation_in_progress) {
 390     st->print("evacuating ");
 391   }
 392   if (cancelled_concgc()) {
 393     st->print("cancelled ");
 394   }
 395   st->print("\n");
 396 
 397   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 398   st->print   ("Virtual space:");
 399   if (_storage.special()) st->print(" (pinned in memory)");
 400   st->cr();
 401   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 402   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 403   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 404   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 405 
 406   if (Verbose) {
 407     print_heap_regions(st);
 408   }
 409 }
 410 
 411 class InitGCLABClosure : public ThreadClosure {
 412 public:
 413   void do_thread(Thread* thread) {
 414     thread->gclab().initialize(true);
 415   }
 416 };
 417 
 418 void ShenandoahHeap::post_initialize() {
 419   if (UseTLAB) {
 420     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 421     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 422     // Their initialization should be handled separately. Is we miss some threads here,
 423     // then any other TLAB-related activity would fail with asserts.
 424 
 425     InitGCLABClosure init_gclabs;
 426     {
 427       MutexLocker ml(Threads_lock);
 428       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 429         init_gclabs.do_thread(thread);
 430       }
 431     }
 432     gc_threads_do(&init_gclabs);
 433 
 434     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 435     // Now, we will let WorkGang to initialize gclab when new worker is created.
 436     _workers->set_initialize_gclab();
 437   }
 438 
 439   _scm->initialize(_max_workers);
 440 
 441   ref_processing_init();
 442 }
 443 
 444 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 445   size_t sum;
 446 public:
 447 
 448   CalculateUsedRegionClosure() {
 449     sum = 0;
 450   }
 451 
 452   bool doHeapRegion(ShenandoahHeapRegion* r) {
 453     sum = sum + r->used();
 454     return false;
 455   }
 456 
 457   size_t getResult() { return sum;}
 458 };
 459 
 460 size_t ShenandoahHeap::calculateUsed() {
 461   CalculateUsedRegionClosure cl;
 462   heap_region_iterate(&cl);
 463   return cl.getResult();
 464 }
 465 
 466 void ShenandoahHeap::verify_heap_size_consistency() {
 467 
 468   assert(calculateUsed() == used(),
 469          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 470 }
 471 
 472 size_t ShenandoahHeap::used() const {
 473   OrderAccess::acquire();
 474   return _used;
 475 }
 476 
 477 void ShenandoahHeap::increase_used(size_t bytes) {
 478   assert_heaplock_or_safepoint();
 479   _used += bytes;
 480 }
 481 
 482 void ShenandoahHeap::set_used(size_t bytes) {
 483   assert_heaplock_or_safepoint();
 484   _used = bytes;
 485 }
 486 
 487 void ShenandoahHeap::decrease_used(size_t bytes) {
 488   assert_heaplock_or_safepoint();
 489   assert(_used >= bytes, "never decrease heap size by more than we've left");
 490   _used -= bytes;
 491 }
 492 
 493 size_t ShenandoahHeap::capacity() const {
 494   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 495 }
 496 
 497 bool ShenandoahHeap::is_maximal_no_gc() const {
 498   Unimplemented();
 499   return true;
 500 }
 501 
 502 size_t ShenandoahHeap::max_capacity() const {
 503   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 504 }
 505 
 506 size_t ShenandoahHeap::min_capacity() const {
 507   return _initialSize;
 508 }
 509 
 510 VirtualSpace* ShenandoahHeap::storage() const {
 511   return (VirtualSpace*) &_storage;
 512 }
 513 
 514 bool ShenandoahHeap::is_in(const void* p) const {
 515   HeapWord* first_region_bottom = _first_region->bottom();
 516   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 517   return p >= _first_region_bottom && p < last_region_end;
 518 }
 519 
 520 bool ShenandoahHeap::is_scavengable(const void* p) {
 521   return true;
 522 }
 523 
 524 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 525   // Retain tlab and allocate object in shared space if
 526   // the amount free in the tlab is too large to discard.
 527   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 528     thread->gclab().record_slow_allocation(size);
 529     return NULL;
 530   }
 531 
 532   // Discard gclab and allocate a new one.
 533   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 534   size_t new_gclab_size = thread->gclab().compute_size(size);
 535 
 536   thread->gclab().clear_before_allocation();
 537 
 538   if (new_gclab_size == 0) {
 539     return NULL;
 540   }
 541 
 542   // Allocate a new GCLAB...
 543   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 544   if (obj == NULL) {
 545     return NULL;
 546   }
 547 
 548   if (ZeroTLAB) {
 549     // ..and clear it.
 550     Copy::zero_to_words(obj, new_gclab_size);
 551   } else {
 552     // ...and zap just allocated object.
 553 #ifdef ASSERT
 554     // Skip mangling the space corresponding to the object header to
 555     // ensure that the returned space is not considered parsable by
 556     // any concurrent GC thread.
 557     size_t hdr_size = oopDesc::header_size();
 558     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 559 #endif // ASSERT
 560   }
 561   thread->gclab().fill(obj, obj + size, new_gclab_size);
 562   return obj;
 563 }
 564 
 565 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 566   return allocate_new_tlab(word_size, false);
 567 }
 568 
 569 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 570   return allocate_new_tlab(word_size, true);
 571 }
 572 
 573 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 574   HeapWord* result = allocate_memory(word_size, evacuating);
 575 
 576   if (result != NULL) {
 577     assert(! in_collection_set(result), "Never allocate in dirty region");
 578     _bytes_allocated_since_cm += word_size * HeapWordSize;
 579 
 580     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 581 
 582   }
 583   return result;
 584 }
 585 
 586 ShenandoahHeap* ShenandoahHeap::heap() {
 587   CollectedHeap* heap = Universe::heap();
 588   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 589   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 590   return (ShenandoahHeap*) heap;
 591 }
 592 
 593 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 594   CollectedHeap* heap = Universe::heap();
 595   return (ShenandoahHeap*) heap;
 596 }
 597 
 598 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 599 
 600   ShenandoahHeapLock heap_lock(this);
 601 
 602   HeapWord* result = allocate_memory_under_lock(word_size);
 603   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 604 
 605   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 606     grow_heap_by(grow_by);
 607     result = allocate_memory_under_lock(word_size);
 608   }
 609 
 610   return result;
 611 }
 612 
 613 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 614   HeapWord* result = NULL;
 615   result = allocate_memory_work(word_size);
 616 
 617   if (!evacuating) {
 618     // Allocation failed, try full-GC, then retry allocation.
 619     //
 620     // It might happen that one of the threads requesting allocation would unblock
 621     // way later after full-GC happened, only to fail the second allocation, because
 622     // other threads have already depleted the free storage. In this case, a better
 623     // strategy would be to try full-GC again.
 624     //
 625     // Lacking the way to detect progress from "collect" call, we are left with blindly
 626     // retrying for some bounded number of times.
 627     // TODO: Poll if Full GC made enough progress to warrant retry.
 628     int tries = 0;
 629     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 630       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 631                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 632       collect(GCCause::_allocation_failure);
 633       result = allocate_memory_work(word_size);
 634     }
 635   }
 636 
 637   // Only update monitoring counters when not calling from a write-barrier.
 638   // Otherwise we might attempt to grab the Service_lock, which we must
 639   // not do when coming from a write-barrier (because the thread might
 640   // already hold the Compile_lock).
 641   if (! evacuating) {
 642     monitoring_support()->update_counters();
 643   }
 644 
 645   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 646                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 647 
 648   return result;
 649 }
 650 
 651 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 652   return evacuating && Thread::current()->is_Java_thread();
 653 }
 654 
 655 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 656   assert_heaplock_owned_by_current_thread();
 657 
 658   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 659     return allocate_large_memory(word_size);
 660   }
 661 
 662   // Not enough memory in free region set.
 663   // Coming out of full GC, it is possible that there is not
 664   // free region available, so current_index may not be valid.
 665   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 666 
 667   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 668 
 669   if (my_current_region == NULL) {
 670     return NULL; // No more room to make a new region. OOM.
 671   }
 672   assert(my_current_region != NULL, "should have a region at this point");
 673 
 674 #ifdef ASSERT
 675   if (in_collection_set(my_current_region)) {
 676     print_heap_regions();
 677   }
 678 #endif
 679   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 680   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 681 
 682   HeapWord* result = my_current_region->allocate(word_size);
 683 
 684   while (result == NULL) {
 685     // 2nd attempt. Try next region.
 686     _free_regions->increase_used(my_current_region->free());
 687     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 688     assert(next_region != my_current_region, "must not get current again");
 689     my_current_region = next_region;
 690 
 691     if (my_current_region == NULL) {
 692       return NULL; // No more room to make a new region. OOM.
 693     }
 694     assert(my_current_region != NULL, "should have a region at this point");
 695     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 696     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 697     result = my_current_region->allocate(word_size);
 698   }
 699 
 700   my_current_region->increase_live_data_words(word_size);
 701   increase_used(word_size * HeapWordSize);
 702   _free_regions->increase_used(word_size * HeapWordSize);
 703   return result;
 704 }
 705 
 706 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 707   assert_heaplock_owned_by_current_thread();
 708 
 709   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 710   if (required_regions > _max_regions) return NULL;
 711 
 712   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 713 
 714   HeapWord* result = NULL;
 715 
 716   if (r != NULL)  {
 717     result = r->bottom();
 718 
 719     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 720                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 721   } else {
 722     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 723                              (words * HeapWordSize) / K, p2i(result));
 724   }
 725 
 726 
 727   return result;
 728 
 729 }
 730 
 731 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 732                                         bool*  gc_overhead_limit_was_exceeded) {
 733 
 734 #ifdef ASSERT
 735   if (ShenandoahVerify && _numAllocs > 1000000) {
 736     _numAllocs = 0;
 737   }
 738   _numAllocs++;
 739 #endif
 740   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 741   HeapWord* result = filler + BrooksPointer::word_size();
 742   if (filler != NULL) {
 743     BrooksPointer::initialize(oop(result));
 744     _bytes_allocated_since_cm += size * HeapWordSize;
 745 
 746     assert(! in_collection_set(result), "never allocate in targetted region");
 747     return result;
 748   } else {
 749     /*
 750     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 751                   size, used(), _bytes_allocated_since_cm);
 752     {
 753       print_heap_regions();
 754       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 755       _free_regions->print();
 756     }
 757     */
 758     return NULL;
 759   }
 760 }
 761 
 762 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 763 private:
 764   ShenandoahHeap* _heap;
 765   Thread* _thread;
 766   public:
 767   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 768     _heap(heap), _thread(Thread::current()) {
 769   }
 770 
 771   void do_object(oop p) {
 772 
 773     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 774 
 775     assert(_heap->is_marked_complete(p), "expect only marked objects");
 776     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 777       _heap->evacuate_object(p, _thread);
 778     }
 779   }
 780 };
 781 
 782 #ifdef ASSERT
 783 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 784 
 785 public:
 786 
 787   void do_object(oop p) {
 788     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 789       oop p_prime = oopDesc::bs()->read_barrier(p);
 790       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 791       if (p->klass() != p_prime->klass()) {
 792         tty->print_cr("copy has different class than original:");
 793         p->klass()->print_on(tty);
 794         p_prime->klass()->print_on(tty);
 795       }
 796       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 797       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 798       assert(p->size() == p_prime->size(), "Should be the same size");
 799       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 800     }
 801   }
 802 };
 803 
 804 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 805   VerifyEvacuatedObjectClosure verify_evacuation;
 806   marked_object_iterate(from_region, &verify_evacuation);
 807 }
 808 #endif
 809 
 810 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 811 
 812   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 813 
 814   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 815 
 816   marked_object_iterate(from_region, &evacuate_region);
 817 
 818 #ifdef ASSERT
 819   if (ShenandoahVerify && ! cancelled_concgc()) {
 820     verify_evacuated_region(from_region);
 821   }
 822 #endif
 823 }
 824 
 825 class ParallelEvacuationTask : public AbstractGangTask {
 826 private:
 827   ShenandoahHeap* _sh;
 828   ShenandoahCollectionSet* _cs;
 829 
 830 public:
 831   ParallelEvacuationTask(ShenandoahHeap* sh,
 832                          ShenandoahCollectionSet* cs) :
 833     AbstractGangTask("Parallel Evacuation Task"),
 834     _cs(cs),
 835     _sh(sh) {}
 836 
 837   void work(uint worker_id) {
 838 
 839     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 840 
 841     while (from_hr != NULL) {
 842       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 843                                     worker_id,
 844                                     from_hr->region_number());
 845 
 846       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 847       _sh->parallel_evacuate_region(from_hr);
 848 
 849       if (_sh->cancelled_concgc()) {
 850         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 851         break;
 852       }
 853       from_hr = _cs->claim_next();
 854     }
 855   }
 856 };
 857 
 858 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 859 private:
 860   ShenandoahHeap* _heap;
 861   size_t _bytes_reclaimed;
 862 public:
 863   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 864 
 865   bool doHeapRegion(ShenandoahHeapRegion* r) {
 866 
 867     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 868 
 869     if (_heap->in_collection_set(r)) {
 870       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 871       _heap->decrease_used(r->used());
 872       _bytes_reclaimed += r->used();
 873       r->recycle();
 874     }
 875 
 876     return false;
 877   }
 878   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 879   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 880 };
 881 
 882 void ShenandoahHeap::recycle_dirty_regions() {
 883   RecycleDirtyRegionsClosure cl;
 884   cl.clear_bytes_reclaimed();
 885 
 886   heap_region_iterate(&cl);
 887 
 888   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 889   if (! cancelled_concgc()) {
 890     clear_cset_fast_test();
 891   }
 892 }
 893 
 894 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 895   return _free_regions;
 896 }
 897 
 898 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 899   _ordered_regions->print(st);
 900 }
 901 
 902 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 903 private:
 904   int _index;
 905   const char* _prefix;
 906 
 907 public:
 908   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 909 
 910 private:
 911   template <class T>
 912   inline void do_oop_work(T* p) {
 913     oop o = oopDesc::load_decode_heap_oop(p);
 914     if (o != NULL) {
 915       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 916         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 917                       _prefix, _index,
 918                       p2i(p), p2i(o),
 919                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 920                       o->klass()->internal_name(), p2i(o->klass()));
 921       } else {
 922         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 923                       _prefix, _index,
 924                       p2i(p), p2i(o));
 925       }
 926     } else {
 927       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 928     }
 929     _index++;
 930   }
 931 
 932 public:
 933   void do_oop(oop* p) {
 934     do_oop_work(p);
 935   }
 936 
 937   void do_oop(narrowOop* p) {
 938     do_oop_work(p);
 939   }
 940 
 941 };
 942 
 943 class PrintAllRefsObjectClosure : public ObjectClosure {
 944   const char* _prefix;
 945 
 946 public:
 947   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 948 
 949   void do_object(oop p) {
 950     if (ShenandoahHeap::heap()->is_in(p)) {
 951         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 952                       _prefix, p2i(p),
 953                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 954                       p->klass()->internal_name(), p2i(p->klass()));
 955         PrintAllRefsOopClosure cl(_prefix);
 956         p->oop_iterate(&cl);
 957       }
 958   }
 959 };
 960 
 961 void ShenandoahHeap::print_all_refs(const char* prefix) {
 962   tty->print_cr("printing all references in the heap");
 963   tty->print_cr("root references:");
 964 
 965   ensure_parsability(false);
 966 
 967   PrintAllRefsOopClosure cl(prefix);
 968   roots_iterate(&cl);
 969 
 970   tty->print_cr("heap references:");
 971   PrintAllRefsObjectClosure cl2(prefix);
 972   object_iterate(&cl2);
 973 }
 974 
 975 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 976 private:
 977   ShenandoahHeap*  _heap;
 978 
 979 public:
 980   VerifyAfterMarkingOopClosure() :
 981     _heap(ShenandoahHeap::heap()) { }
 982 
 983 private:
 984   template <class T>
 985   inline void do_oop_work(T* p) {
 986     oop o = oopDesc::load_decode_heap_oop(p);
 987     if (o != NULL) {
 988       if (! _heap->is_marked_complete(o)) {
 989         _heap->print_heap_regions();
 990         _heap->print_all_refs("post-mark");
 991         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
 992                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
 993         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
 994 
 995         tty->print_cr("oop class: %s", o->klass()->internal_name());
 996         if (_heap->is_in(p)) {
 997           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
 998           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
 999           referrer->print();
1000           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1001         }
1002         tty->print_cr("heap region containing object:");
1003         _heap->heap_region_containing(o)->print();
1004         tty->print_cr("heap region containing referrer:");
1005         _heap->heap_region_containing(p)->print();
1006         tty->print_cr("heap region containing forwardee:");
1007         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1008       }
1009       assert(o->is_oop(), "oop must be an oop");
1010       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1011       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1012         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1013                       p2i(p),
1014                       BOOL_TO_STR(_heap->in_collection_set(p)),
1015                       p2i(o),
1016                       BOOL_TO_STR(_heap->in_collection_set(o)),
1017                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1018                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1019         tty->print_cr("oop class: %s", o->klass()->internal_name());
1020       }
1021       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1022       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1023       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1024     }
1025   }
1026 
1027 public:
1028   void do_oop(oop* p) {
1029     do_oop_work(p);
1030   }
1031 
1032   void do_oop(narrowOop* p) {
1033     do_oop_work(p);
1034   }
1035 
1036 };
1037 
1038 void ShenandoahHeap::verify_heap_after_marking() {
1039 
1040   verify_heap_size_consistency();
1041 
1042   log_trace(gc)("verifying heap after marking");
1043 
1044   VerifyAfterMarkingOopClosure cl;
1045   roots_iterate(&cl);
1046   ObjectToOopClosure objs(&cl);
1047   object_iterate(&objs);
1048 }
1049 
1050 
1051 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1052   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1053 
1054   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1055   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1056   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1057   uint index = r->region_number();
1058 
1059 
1060   assert(!r->has_live(), "liveness must be zero");
1061 
1062   for(size_t i = 0; i < required_regions; i++) {
1063 
1064     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1065 
1066     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1067            "expect correct humongous start or continuation");
1068 
1069     if (log_is_enabled(Debug, gc, humongous)) {
1070       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1071       ResourceMark rm;
1072       outputStream* out = Log(gc, humongous)::debug_stream();
1073       region->print_on(out);
1074     }
1075 
1076     region->recycle();
1077     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1078   }
1079 }
1080 
1081 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1082 
1083   bool doHeapRegion(ShenandoahHeapRegion* r) {
1084     ShenandoahHeap* heap = ShenandoahHeap::heap();
1085 
1086     if (r->is_humongous_start()) {
1087       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1088       if (! heap->is_marked_complete(humongous_obj)) {
1089 
1090         heap->reclaim_humongous_region_at(r);
1091       }
1092     }
1093     return false;
1094   }
1095 };
1096 
1097 #ifdef ASSERT
1098 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1099   bool doHeapRegion(ShenandoahHeapRegion* r) {
1100     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1101     return false;
1102   }
1103 };
1104 #endif
1105 
1106 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1107   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1108 
1109   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1110 
1111   if (!cancelled_concgc()) {
1112 
1113     recycle_dirty_regions();
1114 
1115     ensure_parsability(true);
1116 
1117 #ifdef ASSERT
1118     if (ShenandoahVerify) {
1119       verify_heap_after_marking();
1120     }
1121 #endif
1122 
1123     // NOTE: This needs to be done during a stop the world pause, because
1124     // putting regions into the collection set concurrently with Java threads
1125     // will create a race. In particular, acmp could fail because when we
1126     // resolve the first operand, the containing region might not yet be in
1127     // the collection set, and thus return the original oop. When the 2nd
1128     // operand gets resolved, the region could be in the collection set
1129     // and the oop gets evacuated. If both operands have originally been
1130     // the same, we get false negatives.
1131 
1132     {
1133       ShenandoahHeapLock lock(this);
1134       _collection_set->clear();
1135       _free_regions->clear();
1136 
1137       ShenandoahReclaimHumongousRegionsClosure reclaim;
1138       heap_region_iterate(&reclaim);
1139 
1140 #ifdef ASSERT
1141       CheckCollectionSetClosure ccsc;
1142       _ordered_regions->heap_region_iterate(&ccsc);
1143 #endif
1144 
1145     if (UseShenandoahMatrix) {
1146       int num = num_regions();
1147       int *connections = NEW_C_HEAP_ARRAY(int, num * num, mtGC);
1148       calculate_matrix(connections);
1149       print_matrix(connections);
1150       _shenandoah_policy->choose_collection_set(_collection_set, connections);
1151       FREE_C_HEAP_ARRAY(int,connections);
1152     } else {
1153       _shenandoah_policy->choose_collection_set(_collection_set);
1154     }
1155 
1156     _shenandoah_policy->choose_free_set(_free_regions);
1157     }
1158 
1159     if (UseShenandoahMatrix) {
1160       _collection_set->print();
1161     }
1162 
1163     _bytes_allocated_since_cm = 0;
1164 
1165     Universe::update_heap_info_at_gc();
1166   }
1167 }
1168 
1169 
1170 class RetireTLABClosure : public ThreadClosure {
1171 private:
1172   bool _retire;
1173 
1174 public:
1175   RetireTLABClosure(bool retire) : _retire(retire) {
1176   }
1177 
1178   void do_thread(Thread* thread) {
1179     thread->gclab().make_parsable(_retire);
1180   }
1181 };
1182 
1183 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1184   if (UseTLAB) {
1185     CollectedHeap::ensure_parsability(retire_tlabs);
1186     RetireTLABClosure cl(retire_tlabs);
1187     Threads::threads_do(&cl);
1188   }
1189 }
1190 
1191 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1192 private:
1193   ShenandoahHeap* _heap;
1194   Thread* _thread;
1195 public:
1196   ShenandoahEvacuateUpdateRootsClosure() :
1197     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1198   }
1199 
1200 private:
1201   template <class T>
1202   void do_oop_work(T* p) {
1203     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1204 
1205     T o = oopDesc::load_heap_oop(p);
1206     if (! oopDesc::is_null(o)) {
1207       oop obj = oopDesc::decode_heap_oop_not_null(o);
1208       if (_heap->in_collection_set(obj)) {
1209         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1210                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1211         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1212         if (oopDesc::unsafe_equals(resolved, obj)) {
1213           resolved = _heap->evacuate_object(obj, _thread);
1214         }
1215         oopDesc::encode_store_heap_oop(p, resolved);
1216       }
1217     }
1218 #ifdef ASSERT
1219     else {
1220       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1221       //               p2i(p),
1222       //               p2i((HeapWord*) obj),
1223       //               BOOL_TO_STR(_heap->is_in(obj)),
1224       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1225       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1226     }
1227 #endif
1228   }
1229 
1230 public:
1231   void do_oop(oop* p) {
1232     do_oop_work(p);
1233   }
1234   void do_oop(narrowOop* p) {
1235     do_oop_work(p);
1236   }
1237 };
1238 
1239 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1240   ShenandoahRootEvacuator* _rp;
1241 public:
1242 
1243   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1244     AbstractGangTask("Shenandoah evacuate and update roots"),
1245     _rp(rp)
1246   {
1247     // Nothing else to do.
1248   }
1249 
1250   void work(uint worker_id) {
1251     ShenandoahEvacuateUpdateRootsClosure cl;
1252     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1253 
1254     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1255   }
1256 };
1257 
1258 void ShenandoahHeap::evacuate_and_update_roots() {
1259 
1260   COMPILER2_PRESENT(DerivedPointerTable::clear());
1261 
1262 #ifdef ASSERT
1263   if (ShenandoahVerifyReadsToFromSpace) {
1264     set_from_region_protection(false);
1265   }
1266 #endif
1267 
1268   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1269   ClassLoaderDataGraph::clear_claimed_marks();
1270 
1271   {
1272     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1273     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1274     workers()->run_task(&roots_task);
1275   }
1276 
1277 #ifdef ASSERT
1278   if (ShenandoahVerifyReadsToFromSpace) {
1279     set_from_region_protection(true);
1280   }
1281 #endif
1282 
1283   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1284 
1285 }
1286 
1287 
1288 void ShenandoahHeap::do_evacuation() {
1289 
1290   parallel_evacuate();
1291 
1292   if (ShenandoahVerify && ! cancelled_concgc()) {
1293     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1294     if (Thread::current()->is_VM_thread()) {
1295       verify_after_evacuation.doit();
1296     } else {
1297       VMThread::execute(&verify_after_evacuation);
1298     }
1299   }
1300 
1301 }
1302 
1303 void ShenandoahHeap::parallel_evacuate() {
1304   log_develop_trace(gc)("starting parallel_evacuate");
1305 
1306   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1307 
1308   if (log_is_enabled(Trace, gc, region)) {
1309     ResourceMark rm;
1310     outputStream *out = Log(gc, region)::trace_stream();
1311     out->print("Printing all available regions");
1312     print_heap_regions(out);
1313   }
1314 
1315   if (log_is_enabled(Trace, gc, cset)) {
1316     ResourceMark rm;
1317     outputStream *out = Log(gc, cset)::trace_stream();
1318     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1319     _collection_set->print(out);
1320 
1321     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1322     _free_regions->print(out);
1323   }
1324 
1325   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1326 
1327 
1328   workers()->run_task(&evacuationTask);
1329 
1330   if (log_is_enabled(Trace, gc, cset)) {
1331     ResourceMark rm;
1332     outputStream *out = Log(gc, cset)::trace_stream();
1333     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1334                _collection_set->count());
1335 
1336     _collection_set->print(out);
1337 
1338     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1339                _free_regions->count());
1340     _free_regions->print(out);
1341 
1342   }
1343 
1344   if (log_is_enabled(Trace, gc, region)) {
1345     ResourceMark rm;
1346     outputStream *out = Log(gc, region)::trace_stream();
1347     out->print_cr("all regions after evacuation:");
1348     print_heap_regions(out);
1349   }
1350 
1351   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1352 }
1353 
1354 class VerifyEvacuationClosure: public ExtendedOopClosure {
1355 private:
1356   ShenandoahHeap*  _heap;
1357   ShenandoahHeapRegion* _from_region;
1358 
1359 public:
1360   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1361     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1362 private:
1363   template <class T>
1364   inline void do_oop_work(T* p) {
1365     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1366     if (! oopDesc::is_null(heap_oop)) {
1367       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1368     }
1369   }
1370 
1371 public:
1372   void do_oop(oop* p)       {
1373     do_oop_work(p);
1374   }
1375 
1376   void do_oop(narrowOop* p) {
1377     do_oop_work(p);
1378   }
1379 
1380 };
1381 
1382 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1383 
1384   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1385 
1386   CodeBlobToOopClosure blobsCl(cl, false);
1387   CLDToOopClosure cldCl(cl);
1388 
1389   ClassLoaderDataGraph::clear_claimed_marks();
1390 
1391   ShenandoahRootProcessor rp(this, 1);
1392   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1393 }
1394 
1395 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1396 
1397   VerifyEvacuationClosure rootsCl(from_region);
1398   roots_iterate(&rootsCl);
1399 
1400 }
1401 
1402 bool ShenandoahHeap::supports_tlab_allocation() const {
1403   return true;
1404 }
1405 
1406 
1407 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1408   size_t idx = _free_regions->current_index();
1409   ShenandoahHeapRegion* current = _free_regions->get(idx);
1410   if (current == NULL) {
1411     return 0;
1412   } else if (current->free() > MinTLABSize) {
1413     // Current region has enough space left, can use it.
1414     return current->free();
1415   } else {
1416     // No more space in current region, we will take next free region
1417     // on the next TLAB allocation.
1418     return ShenandoahHeapRegion::RegionSizeBytes;
1419   }
1420 }
1421 
1422 size_t ShenandoahHeap::max_tlab_size() const {
1423   return ShenandoahHeapRegion::RegionSizeBytes;
1424 }
1425 
1426 class ResizeGCLABClosure : public ThreadClosure {
1427 public:
1428   void do_thread(Thread* thread) {
1429     thread->gclab().resize();
1430   }
1431 };
1432 
1433 void ShenandoahHeap::resize_all_tlabs() {
1434   CollectedHeap::resize_all_tlabs();
1435 
1436   ResizeGCLABClosure cl;
1437   Threads::threads_do(&cl);
1438 }
1439 
1440 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1441 public:
1442   void do_thread(Thread* thread) {
1443     thread->gclab().accumulate_statistics();
1444     thread->gclab().initialize_statistics();
1445   }
1446 };
1447 
1448 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1449   AccumulateStatisticsGCLABClosure cl;
1450   Threads::threads_do(&cl);
1451 }
1452 
1453 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1454   return true;
1455 }
1456 
1457 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1458   // Overridden to do nothing.
1459   return new_obj;
1460 }
1461 
1462 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1463   return true;
1464 }
1465 
1466 bool ShenandoahHeap::card_mark_must_follow_store() const {
1467   return false;
1468 }
1469 
1470 void ShenandoahHeap::collect(GCCause::Cause cause) {
1471   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1472   if (GCCause::is_user_requested_gc(cause)) {
1473     if (! DisableExplicitGC) {
1474       _concurrent_gc_thread->do_full_gc(cause);
1475     }
1476   } else if (cause == GCCause::_allocation_failure) {
1477     collector_policy()->set_should_clear_all_soft_refs(true);
1478     _concurrent_gc_thread->do_full_gc(cause);
1479   }
1480 }
1481 
1482 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1483   //assert(false, "Shouldn't need to do full collections");
1484 }
1485 
1486 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1487   Unimplemented();
1488   return NULL;
1489 
1490 }
1491 
1492 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1493   return _shenandoah_policy;
1494 }
1495 
1496 
1497 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1498   Space* sp = heap_region_containing(addr);
1499   if (sp != NULL) {
1500     return sp->block_start(addr);
1501   }
1502   return NULL;
1503 }
1504 
1505 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1506   Space* sp = heap_region_containing(addr);
1507   assert(sp != NULL, "block_size of address outside of heap");
1508   return sp->block_size(addr);
1509 }
1510 
1511 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1512   Space* sp = heap_region_containing(addr);
1513   return sp->block_is_obj(addr);
1514 }
1515 
1516 jlong ShenandoahHeap::millis_since_last_gc() {
1517   return 0;
1518 }
1519 
1520 void ShenandoahHeap::prepare_for_verify() {
1521   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1522     ensure_parsability(false);
1523   }
1524 }
1525 
1526 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1527   workers()->print_worker_threads_on(st);
1528 }
1529 
1530 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1531   workers()->threads_do(tcl);
1532 }
1533 
1534 void ShenandoahHeap::print_tracing_info() const {
1535   if (log_is_enabled(Info, gc, stats)) {
1536     ResourceMark rm;
1537     outputStream* out = Log(gc, stats)::info_stream();
1538     _shenandoah_policy->print_tracing_info(out);
1539   }
1540 }
1541 
1542 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1543 private:
1544   ShenandoahHeap*  _heap;
1545   VerifyOption     _vo;
1546   bool             _failures;
1547 public:
1548   // _vo == UsePrevMarking -> use "prev" marking information,
1549   // _vo == UseNextMarking -> use "next" marking information,
1550   // _vo == UseMarkWord    -> use mark word from object header.
1551   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1552     _heap(ShenandoahHeap::heap()),
1553     _vo(vo),
1554     _failures(false) { }
1555 
1556   bool failures() { return _failures; }
1557 
1558 private:
1559   template <class T>
1560   inline void do_oop_work(T* p) {
1561     oop obj = oopDesc::load_decode_heap_oop(p);
1562     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1563       { // Just for debugging.
1564         tty->print_cr("Root location "PTR_FORMAT
1565                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1566         //      obj->print_on(tty);
1567       }
1568     }
1569     guarantee(obj->is_oop_or_null(), "is oop or null");
1570   }
1571 
1572 public:
1573   void do_oop(oop* p)       {
1574     do_oop_work(p);
1575   }
1576 
1577   void do_oop(narrowOop* p) {
1578     do_oop_work(p);
1579   }
1580 
1581 };
1582 
1583 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1584 private:
1585   ShenandoahVerifyRootsClosure _rootsCl;
1586 public:
1587   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1588     _rootsCl(rc) {};
1589 
1590   void do_object(oop p) {
1591     _rootsCl.do_oop(&p);
1592   }
1593 };
1594 
1595 class ShenandoahVerifyKlassClosure: public KlassClosure {
1596   OopClosure *_oop_closure;
1597  public:
1598   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1599   void do_klass(Klass* k) {
1600     k->oops_do(_oop_closure);
1601   }
1602 };
1603 
1604 void ShenandoahHeap::verify(VerifyOption vo) {
1605   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1606 
1607     ShenandoahVerifyRootsClosure rootsCl(vo);
1608 
1609     assert(Thread::current()->is_VM_thread(),
1610            "Expected to be executed serially by the VM thread at this point");
1611 
1612     roots_iterate(&rootsCl);
1613 
1614     bool failures = rootsCl.failures();
1615     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1616 
1617     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1618 
1619     object_iterate(&heapCl);
1620     // TODO: Implement rest of it.
1621   } else {
1622     tty->print("(SKIPPING roots, heapRegions, remset) ");
1623   }
1624 }
1625 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1626   return _free_regions->capacity();
1627 }
1628 
1629 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1630   ObjectClosure* _cl;
1631 public:
1632   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1633   bool doHeapRegion(ShenandoahHeapRegion* r) {
1634     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1635     return false;
1636   }
1637 };
1638 
1639 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1640   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1641   heap_region_iterate(&blk, false, true);
1642 }
1643 
1644 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1645 private:
1646   ShenandoahHeap* _heap;
1647 
1648 public:
1649   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1650 
1651 private:
1652   template <class T>
1653   inline void do_oop_work(T* p) {
1654     T o = oopDesc::load_heap_oop(p);
1655     if (!oopDesc::is_null(o)) {
1656       oop obj = oopDesc::decode_heap_oop_not_null(o);
1657       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1658     }
1659   }
1660 public:
1661   void do_oop(oop* p) {
1662     do_oop_work(p);
1663   }
1664   void do_oop(narrowOop* p) {
1665     do_oop_work(p);
1666   }
1667 };
1668 
1669 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1670 private:
1671   ObjectClosure* _cl;
1672 public:
1673   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1674 
1675   virtual void do_object(oop obj) {
1676     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1677             "avoid double-counting: only non-forwarded objects here");
1678 
1679     // Fix up the ptrs.
1680     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1681     obj->oop_iterate(&adjust_ptrs);
1682 
1683     // Can reply the object now:
1684     _cl->do_object(obj);
1685   }
1686 };
1687 
1688 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1689   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1690 
1691   // Safe iteration does objects only with correct references.
1692   // This is why we skip dirty regions that have stale copies of objects,
1693   // and fix up the pointers in the returned objects.
1694 
1695   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1696   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1697   heap_region_iterate(&blk,
1698                       /* skip_dirty_regions = */ true,
1699                       /* skip_humongous_continuations = */ true);
1700 
1701   _need_update_refs = false; // already updated the references
1702 }
1703 
1704 // Apply blk->doHeapRegion() on all committed regions in address order,
1705 // terminating the iteration early if doHeapRegion() returns true.
1706 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1707   for (size_t i = 0; i < _num_regions; i++) {
1708     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1709     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1710       continue;
1711     }
1712     if (skip_dirty_regions && in_collection_set(current)) {
1713       continue;
1714     }
1715     if (blk->doHeapRegion(current)) {
1716       return;
1717     }
1718   }
1719 }
1720 
1721 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1722   ShenandoahHeap* sh;
1723 public:
1724   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1725 
1726   bool doHeapRegion(ShenandoahHeapRegion* r) {
1727     r->clear_live_data();
1728     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1729     return false;
1730   }
1731 };
1732 
1733 
1734 void ShenandoahHeap::start_concurrent_marking() {
1735 
1736   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1737   accumulate_statistics_all_tlabs();
1738   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1739 
1740   set_concurrent_mark_in_progress(true);
1741   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1742   if (UseTLAB) {
1743     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1744     ensure_parsability(true);
1745     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1746   }
1747 
1748   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1749   _used_start_gc = used();
1750 
1751 #ifdef ASSERT
1752   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1753     ensure_parsability(false);
1754     print_all_refs("pre-mark");
1755   }
1756 #endif
1757 
1758   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1759   ClearLivenessClosure clc(this);
1760   heap_region_iterate(&clc);
1761   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1762 
1763   // print_all_refs("pre -mark");
1764 
1765   // oopDesc::_debug = true;
1766 
1767   // Make above changes visible to worker threads
1768   OrderAccess::fence();
1769 
1770   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1771   concurrentMark()->init_mark_roots();
1772   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1773 
1774   //  print_all_refs("pre-mark2");
1775 }
1776 
1777 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1778 
1779   ShenandoahHeap* _sh;
1780 
1781 public:
1782   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1783 
1784   template<class T> void do_oop_nv(T* p) {
1785     T heap_oop = oopDesc::load_heap_oop(p);
1786     if (!oopDesc::is_null(heap_oop)) {
1787       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1788       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1789                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1790                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1791                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1792                 obj->klass()->external_name(),
1793                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1794                 );
1795       obj = oopDesc::bs()->read_barrier(obj);
1796       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1797       guarantee(obj->is_oop(), "is_oop");
1798       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1799     }
1800   }
1801 
1802   void do_oop(oop* p)       { do_oop_nv(p); }
1803   void do_oop(narrowOop* p) { do_oop_nv(p); }
1804 
1805 };
1806 
1807 void ShenandoahHeap::verify_heap_after_evacuation() {
1808 
1809   verify_heap_size_consistency();
1810 
1811   ensure_parsability(false);
1812 
1813   VerifyAfterEvacuationClosure cl;
1814   roots_iterate(&cl);
1815 
1816   ObjectToOopClosure objs(&cl);
1817   object_iterate(&objs);
1818 
1819 }
1820 
1821 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1822 public:
1823   bool doHeapRegion(ShenandoahHeapRegion* r) {
1824     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1825     return false;
1826   }
1827 };
1828 
1829 void ShenandoahHeap::swap_mark_bitmaps() {
1830   // Swap bitmaps.
1831   CMBitMap* tmp1 = _complete_mark_bit_map;
1832   _complete_mark_bit_map = _next_mark_bit_map;
1833   _next_mark_bit_map = tmp1;
1834 
1835   // Swap top-at-mark-start pointers
1836   HeapWord** tmp2 = _complete_top_at_mark_starts;
1837   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1838   _next_top_at_mark_starts = tmp2;
1839 
1840   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1841   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1842   _next_top_at_mark_starts_base = tmp3;
1843 }
1844 
1845 void ShenandoahHeap::stop_concurrent_marking() {
1846   assert(concurrent_mark_in_progress(), "How else could we get here?");
1847   if (! cancelled_concgc()) {
1848     // If we needed to update refs, and concurrent marking has been cancelled,
1849     // we need to finish updating references.
1850     set_need_update_refs(false);
1851     swap_mark_bitmaps();
1852   }
1853   set_concurrent_mark_in_progress(false);
1854 
1855   if (log_is_enabled(Trace, gc, region)) {
1856     ResourceMark rm;
1857     outputStream* out = Log(gc, region)::trace_stream();
1858     print_heap_regions(out);
1859   }
1860 
1861 }
1862 
1863 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1864   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1865   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1866 }
1867 
1868 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1869   // Note: it is important to first release the _evacuation_in_progress flag here,
1870   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1871   // in case a VM task is pending.
1872   set_evacuation_in_progress(in_progress);
1873   MutexLocker mu(Threads_lock);
1874   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1875 }
1876 
1877 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1878   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1879   set_evacuation_in_progress(in_progress);
1880   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1881 }
1882 
1883 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1884   _evacuation_in_progress = in_progress ? 1 : 0;
1885   OrderAccess::fence();
1886 }
1887 
1888 void ShenandoahHeap::verify_copy(oop p,oop c){
1889     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1890     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1891     if (p->klass() != c->klass()) {
1892       print_heap_regions();
1893     }
1894     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1895     assert(p->size() == c->size(), "verify size");
1896     // Object may have been locked between copy and verification
1897     //    assert(p->mark() == c->mark(), "verify mark");
1898     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1899   }
1900 
1901 void ShenandoahHeap::oom_during_evacuation() {
1902   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1903                         Thread::current()->osthread()->thread_id());
1904 
1905   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1906   collector_policy()->set_should_clear_all_soft_refs(true);
1907   concurrent_thread()->try_set_full_gc();
1908   cancel_concgc(_oom_evacuation);
1909 
1910   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1911     assert(! Threads_lock->owned_by_self(), "must not hold Threads_lock here");
1912     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1913     while (_evacuation_in_progress) { // wait.
1914       Thread::current()->_ParkEvent->park(1);
1915     }
1916   }
1917 
1918 }
1919 
1920 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1921   // Initialize Brooks pointer for the next object
1922   HeapWord* result = obj + BrooksPointer::word_size();
1923   BrooksPointer::initialize(oop(result));
1924   return result;
1925 }
1926 
1927 uint ShenandoahHeap::oop_extra_words() {
1928   return BrooksPointer::word_size();
1929 }
1930 
1931 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1932   size_t base = _num_regions;
1933   ensure_new_regions(num_regions);
1934   for (size_t i = 0; i < num_regions; i++) {
1935     size_t new_region_index = i + base;
1936     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1937     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1938 
1939     if (log_is_enabled(Trace, gc, region)) {
1940       ResourceMark rm;
1941       outputStream* out = Log(gc, region)::trace_stream();
1942       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
1943       new_region->print_on(out);
1944     }
1945 
1946     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
1947     _ordered_regions->add_region(new_region);
1948     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
1949     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1950     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1951 
1952     _free_regions->add_region(new_region);
1953   }
1954 }
1955 
1956 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
1957 
1958   size_t num_regions = _num_regions;
1959   size_t new_num_regions = num_regions + new_regions;
1960   assert(new_num_regions <= _max_regions, "we checked this earlier");
1961 
1962   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
1963   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
1964   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
1965   assert(success, "should always be able to expand by requested size");
1966 
1967   _num_regions = new_num_regions;
1968 
1969 }
1970 
1971 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1972   _heap(ShenandoahHeap::heap_no_check()) {
1973 }
1974 
1975 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
1976   _heap = heap;
1977 }
1978 
1979 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1980 
1981   assert(_heap != NULL, "sanity");
1982   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1983 #ifdef ASSERT
1984   if (_heap->concurrent_mark_in_progress()) {
1985     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1986   }
1987 #endif
1988   assert(!oopDesc::is_null(obj), "null");
1989   return _heap->is_marked_next(obj);
1990 }
1991 
1992 void ShenandoahHeap::ref_processing_init() {
1993   MemRegion mr = reserved_region();
1994 
1995   isAlive.init(ShenandoahHeap::heap());
1996   assert(_max_workers > 0, "Sanity");
1997 
1998   _ref_processor =
1999     new ReferenceProcessor(mr,    // span
2000                            ParallelRefProcEnabled,
2001                            // mt processing
2002                            _max_workers,
2003                            // degree of mt processing
2004                            true,
2005                            // mt discovery
2006                            _max_workers,
2007                            // degree of mt discovery
2008                            false,
2009                            // Reference discovery is not atomic
2010                            &isAlive);
2011 }
2012 
2013 #ifdef ASSERT
2014 void ShenandoahHeap::set_from_region_protection(bool protect) {
2015   for (uint i = 0; i < _num_regions; i++) {
2016     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2017     if (region != NULL && in_collection_set(region)) {
2018       if (protect) {
2019         region->memProtectionOn();
2020       } else {
2021         region->memProtectionOff();
2022       }
2023     }
2024   }
2025 }
2026 #endif
2027 
2028 size_t ShenandoahHeap::num_regions() {
2029   return _num_regions;
2030 }
2031 
2032 size_t ShenandoahHeap::max_regions() {
2033   return _max_regions;
2034 }
2035 
2036 GCTracer* ShenandoahHeap::tracer() {
2037   return shenandoahPolicy()->tracer();
2038 }
2039 
2040 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2041   return _free_regions->used();
2042 }
2043 
2044 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2045   if (try_cancel_concgc()) {
2046     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2047     _shenandoah_policy->report_concgc_cancelled();
2048   }
2049 }
2050 
2051 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2052   if (try_cancel_concgc()) {
2053     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2054     _shenandoah_policy->report_concgc_cancelled();
2055   }
2056 }
2057 
2058 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2059   switch (cause) {
2060     case _oom_evacuation:
2061       return "Out of memory for evacuation";
2062     case _vm_stop:
2063       return "Stopping VM";
2064     default:
2065       return "Unknown";
2066   }
2067 }
2068 
2069 void ShenandoahHeap::clear_cancelled_concgc() {
2070   set_cancelled_concgc(false);
2071 }
2072 
2073 uint ShenandoahHeap::max_workers() {
2074   return _max_workers;
2075 }
2076 
2077 void ShenandoahHeap::stop() {
2078   // The shutdown sequence should be able to terminate when GC is running.
2079 
2080   // Step 1. Notify control thread that we are in shutdown.
2081   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2082   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2083   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2084 
2085   // Step 2. Notify GC workers that we are cancelling GC.
2086   cancel_concgc(_vm_stop);
2087 
2088   // Step 3. Wait until GC worker exits normally.
2089   _concurrent_gc_thread->stop();
2090 }
2091 
2092 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2093 
2094   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2095   workers()->run_task(&shenandoah_unlink_task);
2096 
2097   //  if (G1StringDedup::is_enabled()) {
2098   //    G1StringDedup::unlink(is_alive);
2099   //  }
2100 }
2101 
2102 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2103   _need_update_refs = need_update_refs;
2104 }
2105 
2106 //fixme this should be in heapregionset
2107 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2108   size_t region_idx = r->region_number() + 1;
2109   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2110   guarantee(next->region_number() == region_idx, "region number must match");
2111   while (next->is_humongous()) {
2112     region_idx = next->region_number() + 1;
2113     next = _ordered_regions->get(region_idx);
2114     guarantee(next->region_number() == region_idx, "region number must match");
2115   }
2116   return next;
2117 }
2118 
2119 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2120   _in_cset_fast_test_base[region_index] = b;
2121 }
2122 
2123 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2124   return _monitoring_support;
2125 }
2126 
2127 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2128   return _complete_mark_bit_map;
2129 }
2130 
2131 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2132   return _next_mark_bit_map;
2133 }
2134 
2135 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2136   _free_regions->add_region(r);
2137 }
2138 
2139 void ShenandoahHeap::clear_free_regions() {
2140   _free_regions->clear();
2141 }
2142 
2143 address ShenandoahHeap::in_cset_fast_test_addr() {
2144   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2145 }
2146 
2147 address ShenandoahHeap::cancelled_concgc_addr() {
2148   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2149 }
2150 
2151 void ShenandoahHeap::clear_cset_fast_test() {
2152   assert(_in_cset_fast_test_base != NULL, "sanity");
2153   memset(_in_cset_fast_test_base, false,
2154          _in_cset_fast_test_length * sizeof(bool));
2155 }
2156 
2157 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2158   return HeapRegionBounds::max_size();
2159 }
2160 
2161 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2162   return _bytes_allocated_since_cm;
2163 }
2164 
2165 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2166   _bytes_allocated_since_cm = bytes;
2167 }
2168 
2169 size_t ShenandoahHeap::max_allocated_gc() {
2170   return _max_allocated_gc;
2171 }
2172 
2173 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2174   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2175   _next_top_at_mark_starts[index] = addr;
2176 }
2177 
2178 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2179   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2180   return _next_top_at_mark_starts[index];
2181 }
2182 
2183 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2184   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2185   _complete_top_at_mark_starts[index] = addr;
2186 }
2187 
2188 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2189   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2190   return _complete_top_at_mark_starts[index];
2191 }
2192 
2193 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2194   _full_gc_in_progress = in_progress;
2195 }
2196 
2197 bool ShenandoahHeap::is_full_gc_in_progress() const {
2198   return _full_gc_in_progress;
2199 }
2200 
2201 class NMethodOopInitializer : public OopClosure {
2202 private:
2203   ShenandoahHeap* _heap;
2204 public:
2205   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2206   }
2207 
2208 private:
2209   template <class T>
2210   inline void do_oop_work(T* p) {
2211     T o = oopDesc::load_heap_oop(p);
2212     if (! oopDesc::is_null(o)) {
2213       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2214       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2215       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2216         oopDesc::encode_store_heap_oop(p, obj2);
2217       }
2218     }
2219   }
2220 
2221 public:
2222   void do_oop(oop* o) {
2223     do_oop_work(o);
2224   }
2225   void do_oop(narrowOop* o) {
2226     do_oop_work(o);
2227   }
2228 };
2229 
2230 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2231   NMethodOopInitializer init;
2232   nm->oops_do(&init);
2233   nm->fix_oop_relocations();
2234 }
2235 
2236 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2237 }
2238 
2239 void ShenandoahHeap::pin_object(oop o) {
2240   heap_region_containing(o)->pin();
2241 }
2242 
2243 void ShenandoahHeap::unpin_object(oop o) {
2244   heap_region_containing(o)->unpin();
2245 }
2246 
2247 
2248 GCTimer* ShenandoahHeap::gc_timer() const {
2249   return _gc_timer;
2250 }
2251 
2252 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2253 private:
2254   int _x;
2255   int *_matrix;
2256   int _num_regions;
2257   oop _p;
2258 
2259 public:
2260   RecordAllRefsOopClosure(int *matrix, int x, size_t num_regions, oop p) :
2261     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2262 
2263   template <class T>
2264   void do_oop_work(T* p) {
2265     oop o = oopDesc::load_decode_heap_oop(p);
2266     if (o != NULL) {
2267       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2268         int y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2269         _matrix[_x * _num_regions + y]++;
2270       }
2271     }
2272   }
2273   void do_oop(oop* p) {
2274     do_oop_work(p);
2275   }
2276 
2277   void do_oop(narrowOop* p) {
2278     do_oop_work(p);
2279   }
2280 
2281 };
2282 
2283 class RecordAllRefsObjectClosure : public ObjectClosure {
2284   int *_matrix;
2285   size_t _num_regions;
2286 
2287 public:
2288   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2289     _matrix(matrix), _num_regions(num_regions) {}
2290 
2291   void do_object(oop p) {
2292     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2293       int x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2294       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2295       p->oop_iterate(&cl);
2296     }
2297   }
2298 };
2299 void ShenandoahHeap::calculate_matrix(int* connections) {
2300   log_develop_trace(gc)("calculating matrix");
2301   ensure_parsability(false);
2302   int num = num_regions();
2303 
2304   for (int i = 0; i < num; i++) {
2305     for (int j = 0; j < num; j++) {
2306       connections[i * num + j] = 0;
2307     }
2308   }
2309 
2310   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2311   roots_iterate(&cl);
2312 
2313   RecordAllRefsObjectClosure cl2(connections, num);
2314   object_iterate(&cl2);
2315 
2316 }
2317 
2318 void ShenandoahHeap::print_matrix(int* connections) {
2319   int num = num_regions();
2320   int cs_regions = 0;
2321   int referenced = 0;
2322 
2323   for (int i = 0; i < num; i++) {
2324     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2325 
2326     int numReferencedRegions = 0;
2327     int numReferencedByRegions = 0;
2328 
2329     for (int j = 0; j < num; j++) {
2330       if (connections[i * num + j] > 0)
2331         numReferencedRegions++;
2332 
2333       if (connections [j * num + i] > 0)
2334         numReferencedByRegions++;
2335 
2336       cs_regions++;
2337       referenced += numReferencedByRegions;
2338     }
2339 
2340     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2341       tty->print("Region %d is referenced by %d regions {",
2342                  i, numReferencedByRegions);
2343       int col_count = 0;
2344       for (int j = 0; j < num; j++) {
2345         int foo = connections[j * num + i];
2346         if (foo > 0) {
2347           col_count++;
2348           if ((col_count % 10) == 0)
2349             tty->print("\n");
2350           tty->print("%d(%d), ", j,foo);
2351         }
2352       }
2353       tty->print("} \n");
2354     }
2355   }
2356 
2357   double avg = (double)referenced / (double) cs_regions;
2358   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2359 }
2360 
2361 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2362 private:
2363   size_t _garbage;
2364 public:
2365   ShenandoahCountGarbageClosure() : _garbage(0) {
2366   }
2367 
2368   bool doHeapRegion(ShenandoahHeapRegion* r) {
2369     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2370       _garbage += r->garbage();
2371     }
2372     return false;
2373   }
2374 
2375   size_t garbage() {
2376     return _garbage;
2377   }
2378 };
2379 
2380 size_t ShenandoahHeap::garbage() {
2381   ShenandoahCountGarbageClosure cl;
2382   heap_region_iterate(&cl);
2383   return cl.garbage();
2384 }
2385 
2386 #ifdef ASSERT
2387 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2388   assert(_heap_lock == locked, "must be locked");
2389   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2390 }
2391 
2392 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2393   Thread* thr = Thread::current();
2394   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2395          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2396   "must own heap lock or by VM thread at safepoint");
2397 }
2398 
2399 #endif