1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  47 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  48 
  49 #include "runtime/vmThread.hpp"
  50 #include "services/mallocTracker.hpp"
  51 
  52 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  53 
  54 #ifdef ASSERT
  55 template <class T>
  56 void AssertToSpaceClosure::do_oop_nv(T* p) {
  57   T o = oopDesc::load_heap_oop(p);
  58   if (! oopDesc::is_null(o)) {
  59     oop obj = oopDesc::decode_heap_oop_not_null(o);
  60     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  61            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  62            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  63   }
  64 }
  65 
  66 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  67 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  68 #endif
  69 
  70 const char* ShenandoahHeap::name() const {
  71   return "Shenandoah";
  72 }
  73 
  74 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  75   HeapWord* cur = NULL;
  76   for (cur = start; cur < end; cur++) {
  77     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  78   }
  79 }
  80 
  81 class PrintHeapRegionsClosure : public
  82    ShenandoahHeapRegionClosure {
  83 private:
  84   outputStream* _st;
  85 public:
  86   PrintHeapRegionsClosure() : _st(tty) {}
  87   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  88 
  89   bool doHeapRegion(ShenandoahHeapRegion* r) {
  90     r->print_on(_st);
  91     return false;
  92   }
  93 };
  94 
  95 class ShenandoahPretouchTask : public AbstractGangTask {
  96 private:
  97   ShenandoahHeapRegionSet* _regions;
  98   const size_t _bitmap_size;
  99   const size_t _page_size;
 100   char* _bitmap0_base;
 101   char* _bitmap1_base;
 102 public:
 103   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
 104                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 105                          size_t page_size) :
 106     AbstractGangTask("Shenandoah PreTouch",
 107                      Universe::is_fully_initialized() ? GCId::current_raw() :
 108                                                         // During VM initialization there is
 109                                                         // no GC cycle that this task can be
 110                                                         // associated with.
 111                                                         GCId::undefined()),
 112     _bitmap0_base(bitmap0_base),
 113     _bitmap1_base(bitmap1_base),
 114     _regions(regions),
 115     _bitmap_size(bitmap_size),
 116     _page_size(page_size) {
 117     _regions->clear_current_index();
 118   };
 119 
 120   virtual void work(uint worker_id) {
 121     ShenandoahHeapRegion* r = _regions->claim_next();
 122     while (r != NULL) {
 123       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 124                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 125       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 126 
 127       size_t start = r->region_number()       * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 128       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 129       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 130 
 131       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 132                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 133       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 134 
 135       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 136                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 137       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 138 
 139       r = _regions->claim_next();
 140     }
 141   }
 142 };
 143 
 144 jint ShenandoahHeap::initialize() {
 145   CollectedHeap::pre_initialize();
 146 
 147   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 148   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 149 
 150   Universe::check_alignment(max_byte_size,
 151                             ShenandoahHeapRegion::RegionSizeBytes,
 152                             "shenandoah heap");
 153   Universe::check_alignment(init_byte_size,
 154                             ShenandoahHeapRegion::RegionSizeBytes,
 155                             "shenandoah heap");
 156 
 157   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 158                                                  Arguments::conservative_max_heap_alignment());
 159   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 160 
 161   set_barrier_set(new ShenandoahBarrierSet(this));
 162   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 163   _storage.initialize(pgc_rs, init_byte_size);
 164 
 165   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 166   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 167   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 168   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 169   assert(init_byte_size == _initialSize, "tautology");
 170   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 171   _collection_set = new ShenandoahCollectionSet(_max_regions);
 172   _free_regions = new ShenandoahFreeSet(_max_regions);
 173 
 174   // Initialize fast collection set test structure.
 175   _in_cset_fast_test_length = _max_regions;
 176   _in_cset_fast_test_base =
 177                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 178   _in_cset_fast_test = _in_cset_fast_test_base -
 179                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 180 
 181   _next_top_at_mark_starts_base =
 182                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 183   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 184                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 185 
 186   _complete_top_at_mark_starts_base =
 187                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 188   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 189                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 190 
 191   size_t i = 0;
 192   for (i = 0; i < _num_regions; i++) {
 193     _in_cset_fast_test_base[i] = false; // Not in cset
 194     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 195     _complete_top_at_mark_starts_base[i] = bottom;
 196     _next_top_at_mark_starts_base[i] = bottom;
 197   }
 198 
 199   {
 200     ShenandoahHeapLock lock(this);
 201     for (i = 0; i < _num_regions; i++) {
 202       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 203                                                                regionSizeWords * i, regionSizeWords, i);
 204       _free_regions->add_region(current);
 205       _ordered_regions->add_region(current);
 206     }
 207   }
 208   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 209   _first_region = _ordered_regions->get(0);
 210   _first_region_bottom = _first_region->bottom();
 211   assert((((size_t) _first_region_bottom) &
 212           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 213          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 214 
 215   _numAllocs = 0;
 216 
 217   if (log_is_enabled(Trace, gc, region)) {
 218     ResourceMark rm;
 219     outputStream* out = Log(gc, region)::trace_stream();
 220     log_trace(gc, region)("All Regions");
 221     _ordered_regions->print(out);
 222     log_trace(gc, region)("Free Regions");
 223     _free_regions->print(out);
 224   }
 225 
 226   // The call below uses stuff (the SATB* things) that are in G1, but probably
 227   // belong into a shared location.
 228   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 229                                                SATB_Q_FL_lock,
 230                                                20 /*G1SATBProcessCompletedThreshold */,
 231                                                Shared_SATB_Q_lock);
 232 
 233   // Reserve space for prev and next bitmap.
 234   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 235   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 236 
 237   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 238 
 239   ReservedSpace bitmap0(_bitmap_size, page_size);
 240   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 241   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 242   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 243 
 244   ReservedSpace bitmap1(_bitmap_size, page_size);
 245   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 246   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 247   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 248 
 249   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 250     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 251     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 252                               "couldn't allocate verification bitmap");
 253     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 254     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 255     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 256   }
 257 
 258   if (ShenandoahAlwaysPreTouch) {
 259     assert (!AlwaysPreTouch, "Should have been overridden");
 260 
 261     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 262     // before initialize() below zeroes it with initializing thread. For any given region,
 263     // we touch the region and the corresponding bitmaps from the same thread.
 264 
 265     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 266                        _ordered_regions->count(), page_size);
 267     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 268     _workers->run_task(&cl);
 269   }
 270 
 271   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 272   _complete_mark_bit_map = &_mark_bit_map0;
 273 
 274   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 275   _next_mark_bit_map = &_mark_bit_map1;
 276 
 277   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 278   _partial_gc = new ShenandoahPartialGC(this, _max_regions);
 279 
 280   _monitoring_support = new ShenandoahMonitoringSupport(this);
 281 
 282   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 283 
 284   ShenandoahMarkCompact::initialize();
 285 
 286   return JNI_OK;
 287 }
 288 
 289 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 290   CollectedHeap(),
 291   _shenandoah_policy(policy),
 292   _concurrent_mark_in_progress(0),
 293   _evacuation_in_progress(0),
 294   _full_gc_in_progress(false),
 295   _free_regions(NULL),
 296   _collection_set(NULL),
 297   _bytes_allocated_since_cm(0),
 298   _bytes_allocated_during_cm(0),
 299   _max_allocated_gc(0),
 300   _allocated_last_gc(0),
 301   _used_start_gc(0),
 302   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 303   _ref_processor(NULL),
 304   _in_cset_fast_test(NULL),
 305   _in_cset_fast_test_base(NULL),
 306   _next_top_at_mark_starts(NULL),
 307   _next_top_at_mark_starts_base(NULL),
 308   _complete_top_at_mark_starts(NULL),
 309   _complete_top_at_mark_starts_base(NULL),
 310   _mark_bit_map0(),
 311   _mark_bit_map1(),
 312   _connection_matrix(NULL),
 313   _cancelled_concgc(false),
 314   _need_update_refs(false),
 315   _need_reset_bitmaps(false),
 316   _heap_lock(0),
 317 #ifdef ASSERT
 318   _heap_lock_owner(NULL),
 319 #endif
 320   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 321 
 322 {
 323   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 324   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 325   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 326 
 327   _scm = new ShenandoahConcurrentMark();
 328   _used = 0;
 329 
 330   _max_workers = MAX2(_max_workers, 1U);
 331   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 332                             /* are_GC_task_threads */true,
 333                             /* are_ConcurrentGC_threads */false);
 334   if (_workers == NULL) {
 335     vm_exit_during_initialization("Failed necessary allocation.");
 336   } else {
 337     _workers->initialize_workers();
 338   }
 339 }
 340 
 341 class ResetNextBitmapTask : public AbstractGangTask {
 342 private:
 343   ShenandoahHeapRegionSet* _regions;
 344 
 345 public:
 346   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 347     AbstractGangTask("Parallel Reset Bitmap Task"),
 348     _regions(regions) {
 349     _regions->clear_current_index();
 350   }
 351 
 352   void work(uint worker_id) {
 353     ShenandoahHeapRegion* region = _regions->claim_next();
 354     ShenandoahHeap* heap = ShenandoahHeap::heap();
 355     while (region != NULL) {
 356       HeapWord* bottom = region->bottom();
 357       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 358       if (top > bottom) {
 359         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 360       }
 361       region = _regions->claim_next();
 362     }
 363   }
 364 };
 365 
 366 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 367   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 368   workers->run_task(&task);
 369 }
 370 
 371 class ResetCompleteBitmapTask : public AbstractGangTask {
 372 private:
 373   ShenandoahHeapRegionSet* _regions;
 374 
 375 public:
 376   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 377     AbstractGangTask("Parallel Reset Bitmap Task"),
 378     _regions(regions) {
 379     _regions->clear_current_index();
 380   }
 381 
 382   void work(uint worker_id) {
 383     ShenandoahHeapRegion* region = _regions->claim_next();
 384     ShenandoahHeap* heap = ShenandoahHeap::heap();
 385     while (region != NULL) {
 386       HeapWord* bottom = region->bottom();
 387       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 388       if (top > bottom) {
 389         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 390       }
 391       region = _regions->claim_next();
 392     }
 393   }
 394 };
 395 
 396 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 397   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 398   workers->run_task(&task);
 399 }
 400 
 401 bool ShenandoahHeap::is_next_bitmap_clear() {
 402   HeapWord* start = _ordered_regions->bottom();
 403   HeapWord* end = _ordered_regions->end();
 404   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 405 }
 406 
 407 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 408   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 409 }
 410 
 411 void ShenandoahHeap::print_on(outputStream* st) const {
 412   st->print("Shenandoah Heap");
 413   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 414   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 415             p2i(reserved_region().start()),
 416             p2i(reserved_region().end()));
 417   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 418   if (_concurrent_mark_in_progress) {
 419     st->print("marking ");
 420   }
 421   if (_evacuation_in_progress) {
 422     st->print("evacuating ");
 423   }
 424   if (cancelled_concgc()) {
 425     st->print("cancelled ");
 426   }
 427   st->print("\n");
 428 
 429   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 430   st->print   ("Virtual space:");
 431   if (_storage.special()) st->print(" (pinned in memory)");
 432   st->cr();
 433   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 434   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 435   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 436   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 437 
 438   if (Verbose) {
 439     print_heap_regions(st);
 440   }
 441 }
 442 
 443 class InitGCLABClosure : public ThreadClosure {
 444 public:
 445   void do_thread(Thread* thread) {
 446     thread->gclab().initialize(true);
 447   }
 448 };
 449 
 450 void ShenandoahHeap::post_initialize() {
 451   if (UseTLAB) {
 452     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 453     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 454     // Their initialization should be handled separately. Is we miss some threads here,
 455     // then any other TLAB-related activity would fail with asserts.
 456 
 457     InitGCLABClosure init_gclabs;
 458     {
 459       MutexLocker ml(Threads_lock);
 460       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 461         init_gclabs.do_thread(thread);
 462       }
 463     }
 464     gc_threads_do(&init_gclabs);
 465 
 466     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 467     // Now, we will let WorkGang to initialize gclab when new worker is created.
 468     _workers->set_initialize_gclab();
 469   }
 470 
 471   _scm->initialize(_max_workers);
 472 
 473   ref_processing_init();
 474 }
 475 
 476 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 477   size_t sum;
 478 public:
 479 
 480   CalculateUsedRegionClosure() {
 481     sum = 0;
 482   }
 483 
 484   bool doHeapRegion(ShenandoahHeapRegion* r) {
 485     sum = sum + r->used();
 486     return false;
 487   }
 488 
 489   size_t getResult() { return sum;}
 490 };
 491 
 492 size_t ShenandoahHeap::calculateUsed() {
 493   CalculateUsedRegionClosure cl;
 494   heap_region_iterate(&cl);
 495   return cl.getResult();
 496 }
 497 
 498 void ShenandoahHeap::verify_heap_size_consistency() {
 499 
 500   assert(calculateUsed() == used(),
 501          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 502 }
 503 
 504 size_t ShenandoahHeap::used() const {
 505   OrderAccess::acquire();
 506   return _used;
 507 }
 508 
 509 void ShenandoahHeap::increase_used(size_t bytes) {
 510   assert_heaplock_or_safepoint();
 511   _used += bytes;
 512 }
 513 
 514 void ShenandoahHeap::set_used(size_t bytes) {
 515   assert_heaplock_or_safepoint();
 516   _used = bytes;
 517 }
 518 
 519 void ShenandoahHeap::decrease_used(size_t bytes) {
 520   assert_heaplock_or_safepoint();
 521   assert(_used >= bytes, "never decrease heap size by more than we've left");
 522   _used -= bytes;
 523 }
 524 
 525 size_t ShenandoahHeap::capacity() const {
 526   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 527 }
 528 
 529 bool ShenandoahHeap::is_maximal_no_gc() const {
 530   Unimplemented();
 531   return true;
 532 }
 533 
 534 size_t ShenandoahHeap::max_capacity() const {
 535   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 536 }
 537 
 538 size_t ShenandoahHeap::min_capacity() const {
 539   return _initialSize;
 540 }
 541 
 542 VirtualSpace* ShenandoahHeap::storage() const {
 543   return (VirtualSpace*) &_storage;
 544 }
 545 
 546 bool ShenandoahHeap::is_in(const void* p) const {
 547   HeapWord* first_region_bottom = _first_region->bottom();
 548   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 549   return p >= _first_region_bottom && p < last_region_end;
 550 }
 551 
 552 bool ShenandoahHeap::is_scavengable(const void* p) {
 553   return true;
 554 }
 555 
 556 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 557   // Retain tlab and allocate object in shared space if
 558   // the amount free in the tlab is too large to discard.
 559   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 560     thread->gclab().record_slow_allocation(size);
 561     return NULL;
 562   }
 563 
 564   // Discard gclab and allocate a new one.
 565   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 566   size_t new_gclab_size = thread->gclab().compute_size(size);
 567 
 568   thread->gclab().clear_before_allocation();
 569 
 570   if (new_gclab_size == 0) {
 571     return NULL;
 572   }
 573 
 574   // Allocate a new GCLAB...
 575   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 576   if (obj == NULL) {
 577     return NULL;
 578   }
 579 
 580   if (ZeroTLAB) {
 581     // ..and clear it.
 582     Copy::zero_to_words(obj, new_gclab_size);
 583   } else {
 584     // ...and zap just allocated object.
 585 #ifdef ASSERT
 586     // Skip mangling the space corresponding to the object header to
 587     // ensure that the returned space is not considered parsable by
 588     // any concurrent GC thread.
 589     size_t hdr_size = oopDesc::header_size();
 590     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 591 #endif // ASSERT
 592   }
 593   thread->gclab().fill(obj, obj + size, new_gclab_size);
 594   return obj;
 595 }
 596 
 597 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 598   return allocate_new_tlab(word_size, false);
 599 }
 600 
 601 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 602   return allocate_new_tlab(word_size, true);
 603 }
 604 
 605 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 606   HeapWord* result = allocate_memory(word_size, evacuating);
 607 
 608   if (result != NULL) {
 609     assert(! in_collection_set(result), "Never allocate in dirty region");
 610     _bytes_allocated_since_cm += word_size * HeapWordSize;
 611 
 612     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 613 
 614   }
 615   return result;
 616 }
 617 
 618 ShenandoahHeap* ShenandoahHeap::heap() {
 619   CollectedHeap* heap = Universe::heap();
 620   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 621   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 622   return (ShenandoahHeap*) heap;
 623 }
 624 
 625 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 626   CollectedHeap* heap = Universe::heap();
 627   return (ShenandoahHeap*) heap;
 628 }
 629 
 630 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 631 
 632   ShenandoahHeapLock heap_lock(this);
 633 
 634   HeapWord* result = allocate_memory_under_lock(word_size);
 635   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 636 
 637   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 638     grow_heap_by(grow_by);
 639     result = allocate_memory_under_lock(word_size);
 640   }
 641 
 642   return result;
 643 }
 644 
 645 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 646   HeapWord* result = NULL;
 647   result = allocate_memory_work(word_size);
 648 
 649   if (!evacuating) {
 650     // Allocation failed, try full-GC, then retry allocation.
 651     //
 652     // It might happen that one of the threads requesting allocation would unblock
 653     // way later after full-GC happened, only to fail the second allocation, because
 654     // other threads have already depleted the free storage. In this case, a better
 655     // strategy would be to try full-GC again.
 656     //
 657     // Lacking the way to detect progress from "collect" call, we are left with blindly
 658     // retrying for some bounded number of times.
 659     // TODO: Poll if Full GC made enough progress to warrant retry.
 660     int tries = 0;
 661     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 662       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 663                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 664       collect(GCCause::_allocation_failure);
 665       result = allocate_memory_work(word_size);
 666     }
 667   }
 668 
 669   // Only update monitoring counters when not calling from a write-barrier.
 670   // Otherwise we might attempt to grab the Service_lock, which we must
 671   // not do when coming from a write-barrier (because the thread might
 672   // already hold the Compile_lock).
 673   if (! evacuating) {
 674     monitoring_support()->update_counters();
 675   }
 676 
 677   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 678                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 679 
 680   return result;
 681 }
 682 
 683 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 684   return evacuating && Thread::current()->is_Java_thread();
 685 }
 686 
 687 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 688   assert_heaplock_owned_by_current_thread();
 689 
 690   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 691     return allocate_large_memory(word_size);
 692   }
 693 
 694   // Not enough memory in free region set.
 695   // Coming out of full GC, it is possible that there is not
 696   // free region available, so current_index may not be valid.
 697   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 698 
 699   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 700 
 701   if (my_current_region == NULL) {
 702     return NULL; // No more room to make a new region. OOM.
 703   }
 704   assert(my_current_region != NULL, "should have a region at this point");
 705 
 706 #ifdef ASSERT
 707   if (in_collection_set(my_current_region)) {
 708     print_heap_regions();
 709   }
 710 #endif
 711   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 712   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 713 
 714   HeapWord* result = my_current_region->allocate(word_size);
 715 
 716   while (result == NULL) {
 717     // 2nd attempt. Try next region.
 718     _free_regions->increase_used(my_current_region->free());
 719     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 720     assert(next_region != my_current_region, "must not get current again");
 721     my_current_region = next_region;
 722 
 723     if (my_current_region == NULL) {
 724       return NULL; // No more room to make a new region. OOM.
 725     }
 726     assert(my_current_region != NULL, "should have a region at this point");
 727     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 728     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 729     result = my_current_region->allocate(word_size);
 730   }
 731 
 732   my_current_region->increase_live_data_words(word_size);
 733   increase_used(word_size * HeapWordSize);
 734   _free_regions->increase_used(word_size * HeapWordSize);
 735   return result;
 736 }
 737 
 738 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 739   assert_heaplock_owned_by_current_thread();
 740 
 741   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 742   if (required_regions > _max_regions) return NULL;
 743 
 744   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 745 
 746   HeapWord* result = NULL;
 747 
 748   if (r != NULL)  {
 749     result = r->bottom();
 750 
 751     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 752                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 753   } else {
 754     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 755                              (words * HeapWordSize) / K, p2i(result));
 756   }
 757 
 758 
 759   return result;
 760 
 761 }
 762 
 763 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 764                                         bool*  gc_overhead_limit_was_exceeded) {
 765 
 766 #ifdef ASSERT
 767   if (ShenandoahVerify && _numAllocs > 1000000) {
 768     _numAllocs = 0;
 769   }
 770   _numAllocs++;
 771 #endif
 772   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 773   HeapWord* result = filler + BrooksPointer::word_size();
 774   if (filler != NULL) {
 775     BrooksPointer::initialize(oop(result));
 776     _bytes_allocated_since_cm += size * HeapWordSize;
 777 
 778     assert(! in_collection_set(result), "never allocate in targetted region");
 779     return result;
 780   } else {
 781     /*
 782     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 783                   size, used(), _bytes_allocated_since_cm);
 784     {
 785       print_heap_regions();
 786       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 787       _free_regions->print();
 788     }
 789     */
 790     return NULL;
 791   }
 792 }
 793 
 794 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 795 private:
 796   ShenandoahHeap* _heap;
 797   Thread* _thread;
 798   public:
 799   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 800     _heap(heap), _thread(Thread::current()) {
 801   }
 802 
 803   void do_object(oop p) {
 804 
 805     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 806 
 807     assert(_heap->is_marked_complete(p), "expect only marked objects");
 808     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 809       bool evac;
 810       _heap->evacuate_object(p, _thread, evac);
 811     }
 812   }
 813 };
 814 
 815 #ifdef ASSERT
 816 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 817 
 818 public:
 819 
 820   void do_object(oop p) {
 821     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 822       oop p_prime = oopDesc::bs()->read_barrier(p);
 823       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 824       if (p->klass() != p_prime->klass()) {
 825         tty->print_cr("copy has different class than original:");
 826         p->klass()->print_on(tty);
 827         p_prime->klass()->print_on(tty);
 828       }
 829       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 830       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 831       assert(p->size() == p_prime->size(), "Should be the same size");
 832       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 833     }
 834   }
 835 };
 836 
 837 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 838   VerifyEvacuatedObjectClosure verify_evacuation;
 839   marked_object_iterate(from_region, &verify_evacuation);
 840 }
 841 #endif
 842 
 843 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 844 
 845   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 846 
 847   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 848 
 849   marked_object_iterate(from_region, &evacuate_region);
 850 
 851 #ifdef ASSERT
 852   if (ShenandoahVerify && ! cancelled_concgc()) {
 853     verify_evacuated_region(from_region);
 854   }
 855 #endif
 856 }
 857 
 858 class ParallelEvacuationTask : public AbstractGangTask {
 859 private:
 860   ShenandoahHeap* _sh;
 861   ShenandoahCollectionSet* _cs;
 862 
 863 public:
 864   ParallelEvacuationTask(ShenandoahHeap* sh,
 865                          ShenandoahCollectionSet* cs) :
 866     AbstractGangTask("Parallel Evacuation Task"),
 867     _cs(cs),
 868     _sh(sh) {}
 869 
 870   void work(uint worker_id) {
 871 
 872     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 873 
 874     while (from_hr != NULL) {
 875       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 876                                     worker_id,
 877                                     from_hr->region_number());
 878 
 879       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 880       _sh->parallel_evacuate_region(from_hr);
 881 
 882       if (_sh->cancelled_concgc()) {
 883         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 884         break;
 885       }
 886       from_hr = _cs->claim_next();
 887     }
 888   }
 889 };
 890 
 891 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 892 private:
 893   ShenandoahHeap* _heap;
 894   size_t _bytes_reclaimed;
 895 public:
 896   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 897 
 898   bool doHeapRegion(ShenandoahHeapRegion* r) {
 899 
 900     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 901 
 902     if (_heap->in_collection_set(r)) {
 903       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 904       _heap->decrease_used(r->used());
 905       _bytes_reclaimed += r->used();
 906       r->recycle();
 907     }
 908 
 909     return false;
 910   }
 911   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 912   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 913 };
 914 
 915 void ShenandoahHeap::recycle_dirty_regions() {
 916   RecycleDirtyRegionsClosure cl;
 917   cl.clear_bytes_reclaimed();
 918 
 919   heap_region_iterate(&cl);
 920 
 921   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 922   if (! cancelled_concgc()) {
 923     clear_cset_fast_test();
 924   }
 925 }
 926 
 927 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 928   return _free_regions;
 929 }
 930 
 931 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 932   _ordered_regions->print(st);
 933 }
 934 
 935 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 936 private:
 937   int _index;
 938   const char* _prefix;
 939 
 940 public:
 941   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 942 
 943 private:
 944   template <class T>
 945   inline void do_oop_work(T* p) {
 946     oop o = oopDesc::load_decode_heap_oop(p);
 947     if (o != NULL) {
 948       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 949         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 950                       _prefix, _index,
 951                       p2i(p), p2i(o),
 952                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 953                       o->klass()->internal_name(), p2i(o->klass()));
 954       } else {
 955         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 956                       _prefix, _index,
 957                       p2i(p), p2i(o));
 958       }
 959     } else {
 960       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 961     }
 962     _index++;
 963   }
 964 
 965 public:
 966   void do_oop(oop* p) {
 967     do_oop_work(p);
 968   }
 969 
 970   void do_oop(narrowOop* p) {
 971     do_oop_work(p);
 972   }
 973 
 974 };
 975 
 976 class PrintAllRefsObjectClosure : public ObjectClosure {
 977   const char* _prefix;
 978 
 979 public:
 980   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 981 
 982   void do_object(oop p) {
 983     if (ShenandoahHeap::heap()->is_in(p)) {
 984         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 985                       _prefix, p2i(p),
 986                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 987                       p->klass()->internal_name(), p2i(p->klass()));
 988         PrintAllRefsOopClosure cl(_prefix);
 989         p->oop_iterate(&cl);
 990       }
 991   }
 992 };
 993 
 994 void ShenandoahHeap::print_all_refs(const char* prefix) {
 995   tty->print_cr("printing all references in the heap");
 996   tty->print_cr("root references:");
 997 
 998   ensure_parsability(false);
 999 
1000   PrintAllRefsOopClosure cl(prefix);
1001   roots_iterate(&cl);
1002 
1003   tty->print_cr("heap references:");
1004   PrintAllRefsObjectClosure cl2(prefix);
1005   object_iterate(&cl2);
1006 }
1007 
1008 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
1009 private:
1010   ShenandoahHeap*  _heap;
1011 
1012 public:
1013   VerifyAfterMarkingOopClosure() :
1014     _heap(ShenandoahHeap::heap()) { }
1015 
1016 private:
1017   template <class T>
1018   inline void do_oop_work(T* p) {
1019     oop o = oopDesc::load_decode_heap_oop(p);
1020     if (o != NULL) {
1021       if (! _heap->is_marked_complete(o)) {
1022         _heap->print_heap_regions();
1023         _heap->print_all_refs("post-mark");
1024         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1025                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1026         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1027 
1028         tty->print_cr("oop class: %s", o->klass()->internal_name());
1029         if (_heap->is_in(p)) {
1030           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1031           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1032           referrer->print();
1033           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1034         }
1035         tty->print_cr("heap region containing object:");
1036         _heap->heap_region_containing(o)->print();
1037         tty->print_cr("heap region containing referrer:");
1038         _heap->heap_region_containing(p)->print();
1039         tty->print_cr("heap region containing forwardee:");
1040         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1041       }
1042       assert(o->is_oop(), "oop must be an oop");
1043       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1044       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1045         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1046                       p2i(p),
1047                       BOOL_TO_STR(_heap->in_collection_set(p)),
1048                       p2i(o),
1049                       BOOL_TO_STR(_heap->in_collection_set(o)),
1050                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1051                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1052         tty->print_cr("oop class: %s", o->klass()->internal_name());
1053       }
1054       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1055       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1056       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1057     }
1058   }
1059 
1060 public:
1061   void do_oop(oop* p) {
1062     do_oop_work(p);
1063   }
1064 
1065   void do_oop(narrowOop* p) {
1066     do_oop_work(p);
1067   }
1068 
1069 };
1070 
1071 void ShenandoahHeap::verify_heap_after_marking() {
1072 
1073   verify_heap_size_consistency();
1074 
1075   log_trace(gc)("verifying heap after marking");
1076 
1077   VerifyAfterMarkingOopClosure cl;
1078   roots_iterate(&cl);
1079   ObjectToOopClosure objs(&cl);
1080   object_iterate(&objs);
1081 }
1082 
1083 
1084 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1085   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1086 
1087   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1088   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1089   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1090   size_t index = r->region_number();
1091 
1092 
1093   assert(!r->has_live(), "liveness must be zero");
1094 
1095   for(size_t i = 0; i < required_regions; i++) {
1096 
1097     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1098 
1099     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1100            "expect correct humongous start or continuation");
1101 
1102     if (log_is_enabled(Debug, gc, humongous)) {
1103       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1104       ResourceMark rm;
1105       outputStream* out = Log(gc, humongous)::debug_stream();
1106       region->print_on(out);
1107     }
1108 
1109     region->recycle();
1110     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1111   }
1112 }
1113 
1114 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1115 
1116   bool doHeapRegion(ShenandoahHeapRegion* r) {
1117     ShenandoahHeap* heap = ShenandoahHeap::heap();
1118 
1119     if (r->is_humongous_start()) {
1120       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1121       if (! heap->is_marked_complete(humongous_obj)) {
1122 
1123         heap->reclaim_humongous_region_at(r);
1124       }
1125     }
1126     return false;
1127   }
1128 };
1129 
1130 #ifdef ASSERT
1131 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1132   bool doHeapRegion(ShenandoahHeapRegion* r) {
1133     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1134     return false;
1135   }
1136 };
1137 #endif
1138 
1139 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1140   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1141 
1142   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1143 
1144   if (!cancelled_concgc()) {
1145 
1146     recycle_dirty_regions();
1147 
1148     ensure_parsability(true);
1149 
1150     if (UseShenandoahMatrix) {
1151       if (PrintShenandoahMatrix) {
1152         outputStream* log = Log(gc)::info_stream();
1153         connection_matrix()->print_on(log);
1154       }
1155     }
1156 
1157     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1158       verify_heap_reachable_at_safepoint();
1159     }
1160 
1161 #ifdef ASSERT
1162     if (ShenandoahVerify) {
1163       verify_heap_after_marking();
1164     }
1165 #endif
1166 
1167     // NOTE: This needs to be done during a stop the world pause, because
1168     // putting regions into the collection set concurrently with Java threads
1169     // will create a race. In particular, acmp could fail because when we
1170     // resolve the first operand, the containing region might not yet be in
1171     // the collection set, and thus return the original oop. When the 2nd
1172     // operand gets resolved, the region could be in the collection set
1173     // and the oop gets evacuated. If both operands have originally been
1174     // the same, we get false negatives.
1175 
1176     {
1177       ShenandoahHeapLock lock(this);
1178       _collection_set->clear();
1179       _free_regions->clear();
1180 
1181       ShenandoahReclaimHumongousRegionsClosure reclaim;
1182       heap_region_iterate(&reclaim);
1183 
1184 #ifdef ASSERT
1185       CheckCollectionSetClosure ccsc;
1186       _ordered_regions->heap_region_iterate(&ccsc);
1187 #endif
1188 
1189       _shenandoah_policy->choose_collection_set(_collection_set);
1190 
1191       _shenandoah_policy->choose_free_set(_free_regions);
1192     }
1193 
1194     _bytes_allocated_since_cm = 0;
1195 
1196     Universe::update_heap_info_at_gc();
1197   }
1198 }
1199 
1200 
1201 class RetireTLABClosure : public ThreadClosure {
1202 private:
1203   bool _retire;
1204 
1205 public:
1206   RetireTLABClosure(bool retire) : _retire(retire) {
1207   }
1208 
1209   void do_thread(Thread* thread) {
1210     thread->gclab().make_parsable(_retire);
1211   }
1212 };
1213 
1214 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1215   if (UseTLAB) {
1216     CollectedHeap::ensure_parsability(retire_tlabs);
1217     RetireTLABClosure cl(retire_tlabs);
1218     Threads::threads_do(&cl);
1219   }
1220 }
1221 
1222 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1223 private:
1224   ShenandoahHeap* _heap;
1225   Thread* _thread;
1226 public:
1227   ShenandoahEvacuateUpdateRootsClosure() :
1228     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1229   }
1230 
1231 private:
1232   template <class T>
1233   void do_oop_work(T* p) {
1234     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1235 
1236     T o = oopDesc::load_heap_oop(p);
1237     if (! oopDesc::is_null(o)) {
1238       oop obj = oopDesc::decode_heap_oop_not_null(o);
1239       if (_heap->in_collection_set(obj)) {
1240         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1241                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1242         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1243         if (oopDesc::unsafe_equals(resolved, obj)) {
1244           bool evac;
1245           resolved = _heap->evacuate_object(obj, _thread, evac);
1246         }
1247         oopDesc::encode_store_heap_oop(p, resolved);
1248       }
1249     }
1250 #ifdef ASSERT
1251     else {
1252       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1253       //               p2i(p),
1254       //               p2i((HeapWord*) obj),
1255       //               BOOL_TO_STR(_heap->is_in(obj)),
1256       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1257       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1258     }
1259 #endif
1260   }
1261 
1262 public:
1263   void do_oop(oop* p) {
1264     do_oop_work(p);
1265   }
1266   void do_oop(narrowOop* p) {
1267     do_oop_work(p);
1268   }
1269 };
1270 
1271 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1272   ShenandoahRootEvacuator* _rp;
1273 public:
1274 
1275   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1276     AbstractGangTask("Shenandoah evacuate and update roots"),
1277     _rp(rp)
1278   {
1279     // Nothing else to do.
1280   }
1281 
1282   void work(uint worker_id) {
1283     ShenandoahEvacuateUpdateRootsClosure cl;
1284     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1285 
1286     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1287   }
1288 };
1289 
1290 class ShenandoahFixRootsTask : public AbstractGangTask {
1291   ShenandoahRootEvacuator* _rp;
1292 public:
1293 
1294   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1295     AbstractGangTask("Shenandoah update roots"),
1296     _rp(rp)
1297   {
1298     // Nothing else to do.
1299   }
1300 
1301   void work(uint worker_id) {
1302     SCMUpdateRefsClosure cl;
1303     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1304 
1305     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1306   }
1307 };
1308 void ShenandoahHeap::evacuate_and_update_roots() {
1309 
1310   COMPILER2_PRESENT(DerivedPointerTable::clear());
1311 
1312 #ifdef ASSERT
1313   if (ShenandoahVerifyReadsToFromSpace) {
1314     set_from_region_protection(false);
1315   }
1316 #endif
1317 
1318   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1319   ClassLoaderDataGraph::clear_claimed_marks();
1320 
1321   {
1322     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1323     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1324     workers()->run_task(&roots_task);
1325   }
1326 
1327   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1328 
1329   if (cancelled_concgc()) {
1330     // If initial evacuation has been cancelled, we need to update all references
1331     // after all workers have finished. Otherwise we might run into the following problem:
1332     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1333     // GC thread 2 evacuates the same object X to to-space
1334     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1335     // clear() and update_pointers() must always be called in pairs,
1336     // cannot nest with above clear()/update_pointers().
1337     COMPILER2_PRESENT(DerivedPointerTable::clear());
1338     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1339     ShenandoahFixRootsTask update_roots_task(&rp);
1340     workers()->run_task(&update_roots_task);
1341     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1342   }
1343 
1344 #ifdef ASSERT
1345   if (ShenandoahVerifyReadsToFromSpace) {
1346     set_from_region_protection(true);
1347   }
1348 #endif
1349 
1350 #ifdef ASSERT
1351   {
1352     AssertToSpaceClosure cl;
1353     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1354     ShenandoahRootEvacuator rp(this, 1);
1355     rp.process_evacuate_roots(&cl, &code_cl, 0);
1356   }
1357 #endif
1358 }
1359 
1360 
1361 void ShenandoahHeap::do_evacuation() {
1362 
1363   parallel_evacuate();
1364 
1365   if (ShenandoahVerify && ! cancelled_concgc()) {
1366     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1367     if (Thread::current()->is_VM_thread()) {
1368       verify_after_evacuation.doit();
1369     } else {
1370       VMThread::execute(&verify_after_evacuation);
1371     }
1372   }
1373 
1374 }
1375 
1376 void ShenandoahHeap::parallel_evacuate() {
1377   log_develop_trace(gc)("starting parallel_evacuate");
1378 
1379   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1380 
1381   if (log_is_enabled(Trace, gc, region)) {
1382     ResourceMark rm;
1383     outputStream *out = Log(gc, region)::trace_stream();
1384     out->print("Printing all available regions");
1385     print_heap_regions(out);
1386   }
1387 
1388   if (log_is_enabled(Trace, gc, cset)) {
1389     ResourceMark rm;
1390     outputStream *out = Log(gc, cset)::trace_stream();
1391     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1392     _collection_set->print(out);
1393 
1394     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1395     _free_regions->print(out);
1396   }
1397 
1398   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1399 
1400 
1401   workers()->run_task(&evacuationTask);
1402 
1403   if (log_is_enabled(Trace, gc, cset)) {
1404     ResourceMark rm;
1405     outputStream *out = Log(gc, cset)::trace_stream();
1406     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1407                _collection_set->count());
1408 
1409     _collection_set->print(out);
1410 
1411     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1412                _free_regions->count());
1413     _free_regions->print(out);
1414 
1415   }
1416 
1417   if (log_is_enabled(Trace, gc, region)) {
1418     ResourceMark rm;
1419     outputStream *out = Log(gc, region)::trace_stream();
1420     out->print_cr("all regions after evacuation:");
1421     print_heap_regions(out);
1422   }
1423 
1424   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1425 }
1426 
1427 class VerifyEvacuationClosure: public ExtendedOopClosure {
1428 private:
1429   ShenandoahHeap*  _heap;
1430   ShenandoahHeapRegion* _from_region;
1431 
1432 public:
1433   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1434     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1435 private:
1436   template <class T>
1437   inline void do_oop_work(T* p) {
1438     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1439     if (! oopDesc::is_null(heap_oop)) {
1440       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1441     }
1442   }
1443 
1444 public:
1445   void do_oop(oop* p)       {
1446     do_oop_work(p);
1447   }
1448 
1449   void do_oop(narrowOop* p) {
1450     do_oop_work(p);
1451   }
1452 
1453 };
1454 
1455 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1456 
1457   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1458 
1459   CodeBlobToOopClosure blobsCl(cl, false);
1460   CLDToOopClosure cldCl(cl);
1461 
1462   ClassLoaderDataGraph::clear_claimed_marks();
1463 
1464   ShenandoahRootProcessor rp(this, 1);
1465   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1466 }
1467 
1468 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1469 
1470   VerifyEvacuationClosure rootsCl(from_region);
1471   roots_iterate(&rootsCl);
1472 
1473 }
1474 
1475 bool ShenandoahHeap::supports_tlab_allocation() const {
1476   return true;
1477 }
1478 
1479 
1480 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1481   size_t idx = _free_regions->current_index();
1482   ShenandoahHeapRegion* current = _free_regions->get(idx);
1483   if (current == NULL) {
1484     return 0;
1485   } else if (current->free() > MinTLABSize) {
1486     // Current region has enough space left, can use it.
1487     return current->free();
1488   } else {
1489     // No more space in current region, we will take next free region
1490     // on the next TLAB allocation.
1491     return ShenandoahHeapRegion::RegionSizeBytes;
1492   }
1493 }
1494 
1495 size_t ShenandoahHeap::max_tlab_size() const {
1496   return ShenandoahHeapRegion::RegionSizeBytes;
1497 }
1498 
1499 class ResizeGCLABClosure : public ThreadClosure {
1500 public:
1501   void do_thread(Thread* thread) {
1502     thread->gclab().resize();
1503   }
1504 };
1505 
1506 void ShenandoahHeap::resize_all_tlabs() {
1507   CollectedHeap::resize_all_tlabs();
1508 
1509   ResizeGCLABClosure cl;
1510   Threads::threads_do(&cl);
1511 }
1512 
1513 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1514 public:
1515   void do_thread(Thread* thread) {
1516     thread->gclab().accumulate_statistics();
1517     thread->gclab().initialize_statistics();
1518   }
1519 };
1520 
1521 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1522   AccumulateStatisticsGCLABClosure cl;
1523   Threads::threads_do(&cl);
1524 }
1525 
1526 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1527   return true;
1528 }
1529 
1530 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1531   // Overridden to do nothing.
1532   return new_obj;
1533 }
1534 
1535 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1536   return true;
1537 }
1538 
1539 bool ShenandoahHeap::card_mark_must_follow_store() const {
1540   return false;
1541 }
1542 
1543 void ShenandoahHeap::collect(GCCause::Cause cause) {
1544   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1545   if (GCCause::is_user_requested_gc(cause)) {
1546     if (! DisableExplicitGC) {
1547       _concurrent_gc_thread->do_full_gc(cause);
1548     }
1549   } else if (cause == GCCause::_allocation_failure) {
1550     collector_policy()->set_should_clear_all_soft_refs(true);
1551     _concurrent_gc_thread->do_full_gc(cause);
1552   }
1553 }
1554 
1555 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1556   //assert(false, "Shouldn't need to do full collections");
1557 }
1558 
1559 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1560   Unimplemented();
1561   return NULL;
1562 
1563 }
1564 
1565 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1566   return _shenandoah_policy;
1567 }
1568 
1569 
1570 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1571   Space* sp = heap_region_containing(addr);
1572   if (sp != NULL) {
1573     return sp->block_start(addr);
1574   }
1575   return NULL;
1576 }
1577 
1578 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1579   Space* sp = heap_region_containing(addr);
1580   assert(sp != NULL, "block_size of address outside of heap");
1581   return sp->block_size(addr);
1582 }
1583 
1584 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1585   Space* sp = heap_region_containing(addr);
1586   return sp->block_is_obj(addr);
1587 }
1588 
1589 jlong ShenandoahHeap::millis_since_last_gc() {
1590   return 0;
1591 }
1592 
1593 void ShenandoahHeap::prepare_for_verify() {
1594   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1595     ensure_parsability(false);
1596   }
1597 }
1598 
1599 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1600   workers()->print_worker_threads_on(st);
1601 }
1602 
1603 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1604   workers()->threads_do(tcl);
1605 }
1606 
1607 void ShenandoahHeap::print_tracing_info() const {
1608   if (log_is_enabled(Info, gc, stats)) {
1609     ResourceMark rm;
1610     outputStream* out = Log(gc, stats)::info_stream();
1611     _shenandoah_policy->print_tracing_info(out);
1612   }
1613 }
1614 
1615 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1616 private:
1617   ShenandoahHeap*  _heap;
1618   VerifyOption     _vo;
1619   bool             _failures;
1620 public:
1621   // _vo == UsePrevMarking -> use "prev" marking information,
1622   // _vo == UseNextMarking -> use "next" marking information,
1623   // _vo == UseMarkWord    -> use mark word from object header.
1624   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1625     _heap(ShenandoahHeap::heap()),
1626     _vo(vo),
1627     _failures(false) { }
1628 
1629   bool failures() { return _failures; }
1630 
1631 private:
1632   template <class T>
1633   inline void do_oop_work(T* p) {
1634     oop obj = oopDesc::load_decode_heap_oop(p);
1635     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1636       { // Just for debugging.
1637         tty->print_cr("Root location "PTR_FORMAT
1638                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1639         //      obj->print_on(tty);
1640       }
1641     }
1642     guarantee(obj->is_oop_or_null(), "is oop or null");
1643   }
1644 
1645 public:
1646   void do_oop(oop* p)       {
1647     do_oop_work(p);
1648   }
1649 
1650   void do_oop(narrowOop* p) {
1651     do_oop_work(p);
1652   }
1653 
1654 };
1655 
1656 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1657 private:
1658   ShenandoahVerifyRootsClosure _rootsCl;
1659 public:
1660   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1661     _rootsCl(rc) {};
1662 
1663   void do_object(oop p) {
1664     _rootsCl.do_oop(&p);
1665   }
1666 };
1667 
1668 class ShenandoahVerifyKlassClosure: public KlassClosure {
1669   OopClosure *_oop_closure;
1670  public:
1671   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1672   void do_klass(Klass* k) {
1673     k->oops_do(_oop_closure);
1674   }
1675 };
1676 
1677 void ShenandoahHeap::verify(VerifyOption vo) {
1678   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1679 
1680     ShenandoahVerifyRootsClosure rootsCl(vo);
1681 
1682     assert(Thread::current()->is_VM_thread(),
1683            "Expected to be executed serially by the VM thread at this point");
1684 
1685     roots_iterate(&rootsCl);
1686 
1687     bool failures = rootsCl.failures();
1688     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1689 
1690     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1691 
1692     object_iterate(&heapCl);
1693     // TODO: Implement rest of it.
1694   } else {
1695     tty->print("(SKIPPING roots, heapRegions, remset) ");
1696   }
1697 }
1698 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1699   return _free_regions->capacity();
1700 }
1701 
1702 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1703   ObjectClosure* _cl;
1704 public:
1705   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1706   bool doHeapRegion(ShenandoahHeapRegion* r) {
1707     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1708     return false;
1709   }
1710 };
1711 
1712 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1713   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1714   heap_region_iterate(&blk, false, true);
1715 }
1716 
1717 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1718 private:
1719   ShenandoahHeap* _heap;
1720 
1721 public:
1722   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1723 
1724 private:
1725   template <class T>
1726   inline void do_oop_work(T* p) {
1727     T o = oopDesc::load_heap_oop(p);
1728     if (!oopDesc::is_null(o)) {
1729       oop obj = oopDesc::decode_heap_oop_not_null(o);
1730       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1731     }
1732   }
1733 public:
1734   void do_oop(oop* p) {
1735     do_oop_work(p);
1736   }
1737   void do_oop(narrowOop* p) {
1738     do_oop_work(p);
1739   }
1740 };
1741 
1742 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1743 private:
1744   ObjectClosure* _cl;
1745 public:
1746   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1747 
1748   virtual void do_object(oop obj) {
1749     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1750             "avoid double-counting: only non-forwarded objects here");
1751 
1752     // Fix up the ptrs.
1753     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1754     obj->oop_iterate(&adjust_ptrs);
1755 
1756     // Can reply the object now:
1757     _cl->do_object(obj);
1758   }
1759 };
1760 
1761 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1762   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1763 
1764   // Safe iteration does objects only with correct references.
1765   // This is why we skip dirty regions that have stale copies of objects,
1766   // and fix up the pointers in the returned objects.
1767 
1768   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1769   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1770   heap_region_iterate(&blk,
1771                       /* skip_dirty_regions = */ true,
1772                       /* skip_humongous_continuations = */ true);
1773 
1774   _need_update_refs = false; // already updated the references
1775 }
1776 
1777 // Apply blk->doHeapRegion() on all committed regions in address order,
1778 // terminating the iteration early if doHeapRegion() returns true.
1779 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1780   for (size_t i = 0; i < _num_regions; i++) {
1781     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1782     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1783       continue;
1784     }
1785     if (skip_dirty_regions && in_collection_set(current)) {
1786       continue;
1787     }
1788     if (blk->doHeapRegion(current)) {
1789       return;
1790     }
1791   }
1792 }
1793 
1794 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1795   ShenandoahHeap* sh;
1796 public:
1797   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1798 
1799   bool doHeapRegion(ShenandoahHeapRegion* r) {
1800     r->clear_live_data();
1801     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1802     return false;
1803   }
1804 };
1805 
1806 void ShenandoahHeap::start_concurrent_marking() {
1807 
1808   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1809   accumulate_statistics_all_tlabs();
1810   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1811 
1812   set_concurrent_mark_in_progress(true);
1813   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1814   if (UseTLAB) {
1815     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1816     ensure_parsability(true);
1817     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1818   }
1819 
1820   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1821   _used_start_gc = used();
1822 
1823 #ifdef ASSERT
1824   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1825     ensure_parsability(false);
1826     print_all_refs("pre-mark");
1827   }
1828 #endif
1829 
1830   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1831   ClearLivenessClosure clc(this);
1832   heap_region_iterate(&clc);
1833   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1834 
1835   if (UseShenandoahMatrix) {
1836     connection_matrix()->clear_all();
1837   }
1838   // print_all_refs("pre -mark");
1839 
1840   // oopDesc::_debug = true;
1841 
1842   // Make above changes visible to worker threads
1843   OrderAccess::fence();
1844 
1845   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1846   concurrentMark()->init_mark_roots();
1847   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1848 
1849   //  print_all_refs("pre-mark2");
1850 }
1851 
1852 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1853 
1854   ShenandoahHeap* _sh;
1855 
1856 public:
1857   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1858 
1859   template<class T> void do_oop_nv(T* p) {
1860     T heap_oop = oopDesc::load_heap_oop(p);
1861     if (!oopDesc::is_null(heap_oop)) {
1862       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1863       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1864                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1865                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1866                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1867                 obj->klass()->external_name(),
1868                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1869                 );
1870       obj = oopDesc::bs()->read_barrier(obj);
1871       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1872       guarantee(obj->is_oop(), "is_oop");
1873       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1874     }
1875   }
1876 
1877   void do_oop(oop* p)       { do_oop_nv(p); }
1878   void do_oop(narrowOop* p) { do_oop_nv(p); }
1879 
1880 };
1881 
1882 void ShenandoahHeap::verify_heap_after_evacuation() {
1883 
1884   verify_heap_size_consistency();
1885 
1886   ensure_parsability(false);
1887 
1888   VerifyAfterEvacuationClosure cl;
1889   roots_iterate(&cl);
1890 
1891   ObjectToOopClosure objs(&cl);
1892   object_iterate(&objs);
1893 
1894 }
1895 
1896 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1897 public:
1898   bool doHeapRegion(ShenandoahHeapRegion* r) {
1899     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1900     return false;
1901   }
1902 };
1903 
1904 void ShenandoahHeap::swap_mark_bitmaps() {
1905   // Swap bitmaps.
1906   CMBitMap* tmp1 = _complete_mark_bit_map;
1907   _complete_mark_bit_map = _next_mark_bit_map;
1908   _next_mark_bit_map = tmp1;
1909 
1910   // Swap top-at-mark-start pointers
1911   HeapWord** tmp2 = _complete_top_at_mark_starts;
1912   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1913   _next_top_at_mark_starts = tmp2;
1914 
1915   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1916   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1917   _next_top_at_mark_starts_base = tmp3;
1918 }
1919 
1920 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1921 private:
1922   SCMObjToScanQueue* _queue;
1923   ShenandoahHeap* _heap;
1924   CMBitMap* _map;
1925   bool _check_matrix;
1926   oop _obj;
1927 public:
1928   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1929           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1930   template <class T>
1931   void do_oop_work(T* p) {
1932     T o = oopDesc::load_heap_oop(p);
1933     if (!oopDesc::is_null(o)) {
1934       oop obj = oopDesc::decode_heap_oop_not_null(o);
1935       guarantee(check_obj_alignment(obj), "sanity");
1936 
1937       guarantee(!oopDesc::is_null(obj), "sanity");
1938       guarantee(_heap->is_in(obj), "sanity");
1939 
1940       oop forw = BrooksPointer::forwardee(obj);
1941       guarantee(!oopDesc::is_null(forw), "sanity");
1942       guarantee(_heap->is_in(forw), "sanity");
1943 
1944       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1945 
1946       if (_check_matrix) {
1947         size_t from_idx = _heap->heap_region_index_containing(p);
1948         size_t to_idx = _heap->heap_region_index_containing(obj);
1949         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1950           tty->print_cr("from-obj: ");
1951           _obj->print_on(tty);
1952           tty->print_cr("to-obj:");
1953           obj->print_on(tty);
1954           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1955           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1956           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1957           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1958           tty->print_cr("from-idx: " SIZE_FORMAT ", to-idx: " SIZE_FORMAT, from_idx, to_idx);
1959 
1960           oop fwd_from = BrooksPointer::forwardee(_obj);
1961           oop fwd_to = BrooksPointer::forwardee(obj);
1962           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1963           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1964           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1965           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1966           size_t fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1967           size_t fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1968           tty->print_cr("forward(from-idx): " SIZE_FORMAT ", forward(to-idx): " SIZE_FORMAT, fwd_from_idx, fwd_to_idx);
1969           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1970         }
1971         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1972         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1973       }
1974 
1975       if (_map->parMark((HeapWord*) obj)) {
1976         _queue->push(SCMTask(obj));
1977       }
1978     }
1979   }
1980 
1981   void do_oop(oop* p) { do_oop_work(p); }
1982   void do_oop(narrowOop* p) { do_oop_work(p); }
1983   void set_obj(oop o) { _obj = o; }
1984 };
1985 
1986 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1987   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1988   guarantee(ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix),
1989             "only when these are enabled, and bitmap is initialized in ShenandoahHeap::initialize");
1990 
1991   OrderAccess::fence();
1992   ensure_parsability(false);
1993 
1994   // Allocate temporary bitmap for storing marking wavefront:
1995   MemRegion mr = MemRegion(_verification_bit_map.startWord(), _verification_bit_map.endWord());
1996   _verification_bit_map.clear_range_large(mr);
1997 
1998   // Initialize a single queue
1999   SCMObjToScanQueue* q = new SCMObjToScanQueue();
2000   q->initialize();
2001 
2002   // Scan root set
2003   ClassLoaderDataGraph::clear_claimed_marks();
2004   ShenandoahRootProcessor rp(this, 1);
2005 
2006   {
2007     VerifyReachableHeapClosure cl(q, &_verification_bit_map, false);
2008     CLDToOopClosure cld_cl(&cl);
2009     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
2010     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
2011   }
2012 
2013   // Finish the scan
2014   {
2015     VerifyReachableHeapClosure cl(q, &_verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
2016     SCMTask task;
2017     while ((q->pop_buffer(task) ||
2018             q->pop_local(task) ||
2019             q->pop_overflow(task))) {
2020       oop obj = task.obj();
2021       assert(!oopDesc::is_null(obj), "must not be null");
2022       cl.set_obj(obj);
2023       obj->oop_iterate(&cl);
2024     }
2025   }
2026 
2027   // Clean up!
2028   delete(q);
2029 }
2030 
2031 void ShenandoahHeap::stop_concurrent_marking() {
2032   assert(concurrent_mark_in_progress(), "How else could we get here?");
2033   if (! cancelled_concgc()) {
2034     // If we needed to update refs, and concurrent marking has been cancelled,
2035     // we need to finish updating references.
2036     set_need_update_refs(false);
2037     swap_mark_bitmaps();
2038   }
2039   set_concurrent_mark_in_progress(false);
2040 
2041   if (log_is_enabled(Trace, gc, region)) {
2042     ResourceMark rm;
2043     outputStream* out = Log(gc, region)::trace_stream();
2044     print_heap_regions(out);
2045   }
2046 
2047 }
2048 
2049 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2050   _concurrent_mark_in_progress = in_progress ? 1 : 0;
2051   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2052 }
2053 
2054 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
2055   // Note: it is important to first release the _evacuation_in_progress flag here,
2056   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
2057   // in case a VM task is pending.
2058   set_evacuation_in_progress(in_progress);
2059   MutexLocker mu(Threads_lock);
2060   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2061 }
2062 
2063 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
2064   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
2065   set_evacuation_in_progress(in_progress);
2066   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2067 }
2068 
2069 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2070   _evacuation_in_progress = in_progress ? 1 : 0;
2071   OrderAccess::fence();
2072 }
2073 
2074 void ShenandoahHeap::verify_copy(oop p,oop c){
2075     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2076     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2077     if (p->klass() != c->klass()) {
2078       print_heap_regions();
2079     }
2080     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2081     assert(p->size() == c->size(), "verify size");
2082     // Object may have been locked between copy and verification
2083     //    assert(p->mark() == c->mark(), "verify mark");
2084     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2085   }
2086 
2087 void ShenandoahHeap::oom_during_evacuation() {
2088   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2089                         Thread::current()->osthread()->thread_id());
2090 
2091   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2092   collector_policy()->set_should_clear_all_soft_refs(true);
2093   concurrent_thread()->try_set_full_gc();
2094   cancel_concgc(_oom_evacuation);
2095 
2096   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2097     assert(! Threads_lock->owned_by_self()
2098            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2099     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2100     while (_evacuation_in_progress) { // wait.
2101       Thread::current()->_ParkEvent->park(1);
2102     }
2103   }
2104 
2105 }
2106 
2107 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2108   // Initialize Brooks pointer for the next object
2109   HeapWord* result = obj + BrooksPointer::word_size();
2110   BrooksPointer::initialize(oop(result));
2111   return result;
2112 }
2113 
2114 uint ShenandoahHeap::oop_extra_words() {
2115   return BrooksPointer::word_size();
2116 }
2117 
2118 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2119   size_t base = _num_regions;
2120   ensure_new_regions(num_regions);
2121   for (size_t i = 0; i < num_regions; i++) {
2122     size_t new_region_index = i + base;
2123     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
2124     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
2125 
2126     if (log_is_enabled(Trace, gc, region)) {
2127       ResourceMark rm;
2128       outputStream* out = Log(gc, region)::trace_stream();
2129       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2130       new_region->print_on(out);
2131     }
2132 
2133     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2134     _ordered_regions->add_region(new_region);
2135     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2136     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2137     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2138 
2139     _free_regions->add_region(new_region);
2140   }
2141 }
2142 
2143 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2144 
2145   size_t num_regions = _num_regions;
2146   size_t new_num_regions = num_regions + new_regions;
2147   assert(new_num_regions <= _max_regions, "we checked this earlier");
2148 
2149   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
2150   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2151   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2152   assert(success, "should always be able to expand by requested size");
2153 
2154   _num_regions = new_num_regions;
2155 
2156 }
2157 
2158 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2159   _heap(ShenandoahHeap::heap_no_check()) {
2160 }
2161 
2162 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2163   _heap = heap;
2164 }
2165 
2166 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2167 
2168   assert(_heap != NULL, "sanity");
2169   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2170 #ifdef ASSERT
2171   if (_heap->concurrent_mark_in_progress()) {
2172     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2173   }
2174 #endif
2175   assert(!oopDesc::is_null(obj), "null");
2176   return _heap->is_marked_next(obj);
2177 }
2178 
2179 void ShenandoahHeap::ref_processing_init() {
2180   MemRegion mr = reserved_region();
2181 
2182   isAlive.init(ShenandoahHeap::heap());
2183   assert(_max_workers > 0, "Sanity");
2184 
2185   _ref_processor =
2186     new ReferenceProcessor(mr,    // span
2187                            ParallelRefProcEnabled,
2188                            // mt processing
2189                            _max_workers,
2190                            // degree of mt processing
2191                            true,
2192                            // mt discovery
2193                            _max_workers,
2194                            // degree of mt discovery
2195                            false,
2196                            // Reference discovery is not atomic
2197                            &isAlive);
2198 }
2199 
2200 #ifdef ASSERT
2201 void ShenandoahHeap::set_from_region_protection(bool protect) {
2202   for (uint i = 0; i < _num_regions; i++) {
2203     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2204     if (region != NULL && in_collection_set(region)) {
2205       if (protect) {
2206         region->memProtectionOn();
2207       } else {
2208         region->memProtectionOff();
2209       }
2210     }
2211   }
2212 }
2213 #endif
2214 
2215 size_t ShenandoahHeap::num_regions() {
2216   return _num_regions;
2217 }
2218 
2219 size_t ShenandoahHeap::max_regions() {
2220   return _max_regions;
2221 }
2222 
2223 GCTracer* ShenandoahHeap::tracer() {
2224   return shenandoahPolicy()->tracer();
2225 }
2226 
2227 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2228   return _free_regions->used();
2229 }
2230 
2231 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2232   if (try_cancel_concgc()) {
2233     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2234     _shenandoah_policy->report_concgc_cancelled();
2235   }
2236 }
2237 
2238 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2239   if (try_cancel_concgc()) {
2240     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2241     _shenandoah_policy->report_concgc_cancelled();
2242   }
2243 }
2244 
2245 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2246   switch (cause) {
2247     case _oom_evacuation:
2248       return "Out of memory for evacuation";
2249     case _vm_stop:
2250       return "Stopping VM";
2251     default:
2252       return "Unknown";
2253   }
2254 }
2255 
2256 uint ShenandoahHeap::max_workers() {
2257   return _max_workers;
2258 }
2259 
2260 void ShenandoahHeap::stop() {
2261   // The shutdown sequence should be able to terminate when GC is running.
2262 
2263   // Step 1. Notify control thread that we are in shutdown.
2264   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2265   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2266   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2267 
2268   // Step 2. Notify GC workers that we are cancelling GC.
2269   cancel_concgc(_vm_stop);
2270 
2271   // Step 3. Wait until GC worker exits normally.
2272   _concurrent_gc_thread->stop();
2273 }
2274 
2275 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2276 
2277   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2278   workers()->run_task(&shenandoah_unlink_task);
2279 
2280   //  if (G1StringDedup::is_enabled()) {
2281   //    G1StringDedup::unlink(is_alive);
2282   //  }
2283 }
2284 
2285 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2286   _need_update_refs = need_update_refs;
2287 }
2288 
2289 //fixme this should be in heapregionset
2290 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2291   size_t region_idx = r->region_number() + 1;
2292   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2293   guarantee(next->region_number() == region_idx, "region number must match");
2294   while (next->is_humongous()) {
2295     region_idx = next->region_number() + 1;
2296     next = _ordered_regions->get(region_idx);
2297     guarantee(next->region_number() == region_idx, "region number must match");
2298   }
2299   return next;
2300 }
2301 
2302 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2303   _in_cset_fast_test_base[region_index] = b;
2304 }
2305 
2306 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2307   return _monitoring_support;
2308 }
2309 
2310 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2311   return _complete_mark_bit_map;
2312 }
2313 
2314 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2315   return _next_mark_bit_map;
2316 }
2317 
2318 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2319   _free_regions->add_region(r);
2320 }
2321 
2322 void ShenandoahHeap::clear_free_regions() {
2323   _free_regions->clear();
2324 }
2325 
2326 address ShenandoahHeap::in_cset_fast_test_addr() {
2327   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2328 }
2329 
2330 address ShenandoahHeap::cancelled_concgc_addr() {
2331   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2332 }
2333 
2334 void ShenandoahHeap::clear_cset_fast_test() {
2335   assert(_in_cset_fast_test_base != NULL, "sanity");
2336   memset(_in_cset_fast_test_base, false,
2337          _in_cset_fast_test_length * sizeof(bool));
2338 }
2339 
2340 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2341   return ShenandoahMaxRegionSize;
2342 }
2343 
2344 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2345   return _bytes_allocated_since_cm;
2346 }
2347 
2348 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2349   _bytes_allocated_since_cm = bytes;
2350 }
2351 
2352 size_t ShenandoahHeap::max_allocated_gc() {
2353   return _max_allocated_gc;
2354 }
2355 
2356 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2357   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2358   _next_top_at_mark_starts[index] = addr;
2359 }
2360 
2361 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2362   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2363   return _next_top_at_mark_starts[index];
2364 }
2365 
2366 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2367   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2368   _complete_top_at_mark_starts[index] = addr;
2369 }
2370 
2371 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2372   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2373   return _complete_top_at_mark_starts[index];
2374 }
2375 
2376 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2377   _full_gc_in_progress = in_progress;
2378 }
2379 
2380 bool ShenandoahHeap::is_full_gc_in_progress() const {
2381   return _full_gc_in_progress;
2382 }
2383 
2384 class NMethodOopInitializer : public OopClosure {
2385 private:
2386   ShenandoahHeap* _heap;
2387 public:
2388   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2389   }
2390 
2391 private:
2392   template <class T>
2393   inline void do_oop_work(T* p) {
2394     T o = oopDesc::load_heap_oop(p);
2395     if (! oopDesc::is_null(o)) {
2396       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2397       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2398       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2399         oopDesc::encode_store_heap_oop(p, obj2);
2400       }
2401     }
2402   }
2403 
2404 public:
2405   void do_oop(oop* o) {
2406     do_oop_work(o);
2407   }
2408   void do_oop(narrowOop* o) {
2409     do_oop_work(o);
2410   }
2411 };
2412 
2413 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2414   NMethodOopInitializer init;
2415   nm->oops_do(&init);
2416   nm->fix_oop_relocations();
2417 }
2418 
2419 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2420 }
2421 
2422 void ShenandoahHeap::pin_object(oop o) {
2423   heap_region_containing(o)->pin();
2424 }
2425 
2426 void ShenandoahHeap::unpin_object(oop o) {
2427   heap_region_containing(o)->unpin();
2428 }
2429 
2430 
2431 GCTimer* ShenandoahHeap::gc_timer() const {
2432   return _gc_timer;
2433 }
2434 
2435 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2436 private:
2437   size_t _x;
2438   int *_matrix;
2439   size_t _num_regions;
2440   oop _p;
2441 
2442 public:
2443   RecordAllRefsOopClosure(int *matrix, size_t x, size_t num_regions, oop p) :
2444     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2445 
2446   template <class T>
2447   void do_oop_work(T* p) {
2448     oop o = oopDesc::load_decode_heap_oop(p);
2449     if (o != NULL) {
2450       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2451         size_t y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2452         _matrix[_x * _num_regions + y]++;
2453       }
2454     }
2455   }
2456   void do_oop(oop* p) {
2457     do_oop_work(p);
2458   }
2459 
2460   void do_oop(narrowOop* p) {
2461     do_oop_work(p);
2462   }
2463 
2464 };
2465 
2466 class RecordAllRefsObjectClosure : public ObjectClosure {
2467   int *_matrix;
2468   size_t _num_regions;
2469 
2470 public:
2471   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2472     _matrix(matrix), _num_regions(num_regions) {}
2473 
2474   void do_object(oop p) {
2475     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2476       size_t x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2477       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2478       p->oop_iterate(&cl);
2479     }
2480   }
2481 };
2482 void ShenandoahHeap::calculate_matrix(int* connections) {
2483   log_develop_trace(gc)("calculating matrix");
2484   ensure_parsability(false);
2485   size_t num = num_regions();
2486 
2487   for (size_t i = 0; i < num; i++) {
2488     for (size_t j = 0; j < num; j++) {
2489       connections[i * num + j] = 0;
2490     }
2491   }
2492 
2493   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2494   roots_iterate(&cl);
2495 
2496   RecordAllRefsObjectClosure cl2(connections, num);
2497   object_iterate(&cl2);
2498 
2499 }
2500 
2501 void ShenandoahHeap::print_matrix(int* connections) {
2502   size_t num = num_regions();
2503   int cs_regions = 0;
2504   int referenced = 0;
2505 
2506   for (size_t i = 0; i < num; i++) {
2507     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2508 
2509     int numReferencedRegions = 0;
2510     int numReferencedByRegions = 0;
2511 
2512     for (size_t j = 0; j < num; j++) {
2513       if (connections[i * num + j] > 0)
2514         numReferencedRegions++;
2515 
2516       if (connections [j * num + i] > 0)
2517         numReferencedByRegions++;
2518 
2519       cs_regions++;
2520       referenced += numReferencedByRegions;
2521     }
2522 
2523     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2524       tty->print("Region " SIZE_FORMAT " is referenced by %d regions {", i, numReferencedByRegions);
2525       int col_count = 0;
2526       for (size_t j = 0; j < num; j++) {
2527         int foo = connections[j * num + i];
2528         if (foo > 0) {
2529           col_count++;
2530           if ((col_count % 10) == 0)
2531             tty->print("\n");
2532           tty->print("" SIZE_FORMAT "(%d), ", j, foo);
2533         }
2534       }
2535       tty->print("} \n");
2536     }
2537   }
2538 
2539   double avg = (double)referenced / (double) cs_regions;
2540   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2541 }
2542 
2543 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2544 private:
2545   size_t _garbage;
2546 public:
2547   ShenandoahCountGarbageClosure() : _garbage(0) {
2548   }
2549 
2550   bool doHeapRegion(ShenandoahHeapRegion* r) {
2551     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2552       _garbage += r->garbage();
2553     }
2554     return false;
2555   }
2556 
2557   size_t garbage() {
2558     return _garbage;
2559   }
2560 };
2561 
2562 size_t ShenandoahHeap::garbage() {
2563   ShenandoahCountGarbageClosure cl;
2564   heap_region_iterate(&cl);
2565   return cl.garbage();
2566 }
2567 
2568 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2569   return _connection_matrix;
2570 }
2571 
2572 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2573   return _partial_gc;
2574 }
2575 
2576 void ShenandoahHeap::do_partial_collection() {
2577   {
2578     ShenandoahHeapLock lock(this);
2579     partial_gc()->prepare();
2580   }
2581   partial_gc()->do_partial_collection();
2582 }
2583 
2584 #ifdef ASSERT
2585 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2586   assert(_heap_lock == locked, "must be locked");
2587   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2588 }
2589 
2590 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2591   Thread* thr = Thread::current();
2592   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2593          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2594   "must own heap lock or by VM thread at safepoint");
2595 }
2596 
2597 #endif