1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  48 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  49 
  50 #include "runtime/vmThread.hpp"
  51 #include "services/mallocTracker.hpp"
  52 
  53 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  54 
  55 #ifdef ASSERT
  56 template <class T>
  57 void AssertToSpaceClosure::do_oop_nv(T* p) {
  58   T o = oopDesc::load_heap_oop(p);
  59   if (! oopDesc::is_null(o)) {
  60     oop obj = oopDesc::decode_heap_oop_not_null(o);
  61     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  62            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  63            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  64   }
  65 }
  66 
  67 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  68 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  69 #endif
  70 
  71 const char* ShenandoahHeap::name() const {
  72   return "Shenandoah";
  73 }
  74 
  75 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  76   HeapWord* cur = NULL;
  77   for (cur = start; cur < end; cur++) {
  78     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  79   }
  80 }
  81 
  82 class PrintHeapRegionsClosure : public
  83    ShenandoahHeapRegionClosure {
  84 private:
  85   outputStream* _st;
  86 public:
  87   PrintHeapRegionsClosure() : _st(tty) {}
  88   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  89 
  90   bool doHeapRegion(ShenandoahHeapRegion* r) {
  91     r->print_on(_st);
  92     return false;
  93   }
  94 };
  95 
  96 class ShenandoahPretouchTask : public AbstractGangTask {
  97 private:
  98   ShenandoahHeapRegionSet* _regions;
  99   const size_t _bitmap_size;
 100   const size_t _page_size;
 101   char* _bitmap0_base;
 102   char* _bitmap1_base;
 103 public:
 104   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
 105                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 106                          size_t page_size) :
 107     AbstractGangTask("Shenandoah PreTouch",
 108                      Universe::is_fully_initialized() ? GCId::current_raw() :
 109                                                         // During VM initialization there is
 110                                                         // no GC cycle that this task can be
 111                                                         // associated with.
 112                                                         GCId::undefined()),
 113     _bitmap0_base(bitmap0_base),
 114     _bitmap1_base(bitmap1_base),
 115     _regions(regions),
 116     _bitmap_size(bitmap_size),
 117     _page_size(page_size) {
 118     _regions->clear_current_index();
 119   };
 120 
 121   virtual void work(uint worker_id) {
 122     ShenandoahHeapRegion* r = _regions->claim_next();
 123     while (r != NULL) {
 124       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 125                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 126       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 127 
 128       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 129       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 130       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 131 
 132       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 133                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 134       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 135 
 136       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 137                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 138       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 139 
 140       r = _regions->claim_next();
 141     }
 142   }
 143 };
 144 
 145 jint ShenandoahHeap::initialize() {
 146   CollectedHeap::pre_initialize();
 147 
 148   BrooksPointer::initial_checks();
 149 
 150   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 151   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 152 
 153   Universe::check_alignment(max_byte_size,
 154                             ShenandoahHeapRegion::region_size_bytes(),
 155                             "shenandoah heap");
 156   Universe::check_alignment(init_byte_size,
 157                             ShenandoahHeapRegion::region_size_bytes(),
 158                             "shenandoah heap");
 159 
 160   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 161                                                  Arguments::conservative_max_heap_alignment());
 162   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 163 
 164   set_barrier_set(new ShenandoahBarrierSet(this));
 165   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 166   _storage.initialize(pgc_rs, init_byte_size);
 167 
 168   _num_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 169   _max_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 170   _initialSize = _num_regions * ShenandoahHeapRegion::region_size_bytes();
 171   size_t regionSizeWords = ShenandoahHeapRegion::region_size_bytes() / HeapWordSize;
 172   assert(init_byte_size == _initialSize, "tautology");
 173   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 174   _collection_set = new ShenandoahCollectionSet(_max_regions);
 175   _free_regions = new ShenandoahFreeSet(_max_regions);
 176 
 177   // Initialize fast collection set test structure.
 178   _in_cset_fast_test_length = _max_regions;
 179   _in_cset_fast_test_base =
 180                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 181   _in_cset_fast_test = _in_cset_fast_test_base -
 182                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 183 
 184   _next_top_at_mark_starts_base =
 185                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 186   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 187                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 188 
 189   _complete_top_at_mark_starts_base =
 190                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 191   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 192                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 193 
 194   size_t i = 0;
 195   for (i = 0; i < _num_regions; i++) {
 196     _in_cset_fast_test_base[i] = false; // Not in cset
 197     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 198     _complete_top_at_mark_starts_base[i] = bottom;
 199     _next_top_at_mark_starts_base[i] = bottom;
 200   }
 201 
 202   {
 203     ShenandoahHeapLock lock(this);
 204     for (i = 0; i < _num_regions; i++) {
 205       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 206                                                                regionSizeWords * i, regionSizeWords, i);
 207       _free_regions->add_region(current);
 208       _ordered_regions->add_region(current);
 209     }
 210   }
 211   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 212   _first_region = _ordered_regions->get(0);
 213   _first_region_bottom = _first_region->bottom();
 214   assert((((size_t) _first_region_bottom) &
 215           (ShenandoahHeapRegion::region_size_bytes() - 1)) == 0,
 216          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 217 
 218   _numAllocs = 0;
 219 
 220   if (log_is_enabled(Trace, gc, region)) {
 221     ResourceMark rm;
 222     outputStream* out = Log(gc, region)::trace_stream();
 223     log_trace(gc, region)("All Regions");
 224     _ordered_regions->print(out);
 225     log_trace(gc, region)("Free Regions");
 226     _free_regions->print(out);
 227   }
 228 
 229   // The call below uses stuff (the SATB* things) that are in G1, but probably
 230   // belong into a shared location.
 231   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 232                                                SATB_Q_FL_lock,
 233                                                20 /*G1SATBProcessCompletedThreshold */,
 234                                                Shared_SATB_Q_lock);
 235 
 236   // Reserve space for prev and next bitmap.
 237   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 238   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 239 
 240   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 241 
 242   ReservedSpace bitmap0(_bitmap_size, page_size);
 243   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 244   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 245   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 246 
 247   ReservedSpace bitmap1(_bitmap_size, page_size);
 248   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 249   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 250   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 251 
 252   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 253     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 254     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 255                               "couldn't allocate verification bitmap");
 256     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 257     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 258     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 259   }
 260 
 261   if (ShenandoahAlwaysPreTouch) {
 262     assert (!AlwaysPreTouch, "Should have been overridden");
 263 
 264     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 265     // before initialize() below zeroes it with initializing thread. For any given region,
 266     // we touch the region and the corresponding bitmaps from the same thread.
 267 
 268     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 269                        _ordered_regions->count(), page_size);
 270     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 271     _workers->run_task(&cl);
 272   }
 273 
 274   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 275   _complete_mark_bit_map = &_mark_bit_map0;
 276 
 277   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 278   _next_mark_bit_map = &_mark_bit_map1;
 279 
 280   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 281   _partial_gc = new ShenandoahPartialGC(this, _max_regions);
 282 
 283   _monitoring_support = new ShenandoahMonitoringSupport(this);
 284 
 285   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 286 
 287   ShenandoahMarkCompact::initialize();
 288 
 289   return JNI_OK;
 290 }
 291 
 292 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 293   CollectedHeap(),
 294   _shenandoah_policy(policy),
 295   _concurrent_mark_in_progress(0),
 296   _evacuation_in_progress(0),
 297   _full_gc_in_progress(false),
 298   _update_refs_in_progress(false),
 299   _free_regions(NULL),
 300   _collection_set(NULL),
 301   _bytes_allocated_since_cm(0),
 302   _bytes_allocated_during_cm(0),
 303   _max_allocated_gc(0),
 304   _allocated_last_gc(0),
 305   _used_start_gc(0),
 306   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 307   _ref_processor(NULL),
 308   _in_cset_fast_test(NULL),
 309   _in_cset_fast_test_base(NULL),
 310   _next_top_at_mark_starts(NULL),
 311   _next_top_at_mark_starts_base(NULL),
 312   _complete_top_at_mark_starts(NULL),
 313   _complete_top_at_mark_starts_base(NULL),
 314   _mark_bit_map0(),
 315   _mark_bit_map1(),
 316   _connection_matrix(NULL),
 317   _cancelled_concgc(false),
 318   _need_update_refs(false),
 319   _need_reset_bitmaps(false),
 320   _heap_lock(0),
 321 #ifdef ASSERT
 322   _heap_lock_owner(NULL),
 323 #endif
 324   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 325 
 326 {
 327   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 328   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 329   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 330 
 331   _scm = new ShenandoahConcurrentMark();
 332   _used = 0;
 333 
 334   _max_workers = MAX2(_max_workers, 1U);
 335   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 336                             /* are_GC_task_threads */true,
 337                             /* are_ConcurrentGC_threads */false);
 338   if (_workers == NULL) {
 339     vm_exit_during_initialization("Failed necessary allocation.");
 340   } else {
 341     _workers->initialize_workers();
 342   }
 343 }
 344 
 345 class ResetNextBitmapTask : public AbstractGangTask {
 346 private:
 347   ShenandoahHeapRegionSet* _regions;
 348 
 349 public:
 350   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 351     AbstractGangTask("Parallel Reset Bitmap Task"),
 352     _regions(regions) {
 353     _regions->clear_current_index();
 354   }
 355 
 356   void work(uint worker_id) {
 357     ShenandoahHeapRegion* region = _regions->claim_next();
 358     ShenandoahHeap* heap = ShenandoahHeap::heap();
 359     while (region != NULL) {
 360       HeapWord* bottom = region->bottom();
 361       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 362       if (top > bottom) {
 363         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 364       }
 365       region = _regions->claim_next();
 366     }
 367   }
 368 };
 369 
 370 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 371   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 372   workers->run_task(&task);
 373 }
 374 
 375 class ResetCompleteBitmapTask : public AbstractGangTask {
 376 private:
 377   ShenandoahHeapRegionSet* _regions;
 378 
 379 public:
 380   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 381     AbstractGangTask("Parallel Reset Bitmap Task"),
 382     _regions(regions) {
 383     _regions->clear_current_index();
 384   }
 385 
 386   void work(uint worker_id) {
 387     ShenandoahHeapRegion* region = _regions->claim_next();
 388     ShenandoahHeap* heap = ShenandoahHeap::heap();
 389     while (region != NULL) {
 390       HeapWord* bottom = region->bottom();
 391       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 392       if (top > bottom) {
 393         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 394       }
 395       region = _regions->claim_next();
 396     }
 397   }
 398 };
 399 
 400 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 401   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 402   workers->run_task(&task);
 403 }
 404 
 405 bool ShenandoahHeap::is_next_bitmap_clear() {
 406   HeapWord* start = _ordered_regions->bottom();
 407   HeapWord* end = _ordered_regions->end();
 408   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 409 }
 410 
 411 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 412   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 413 }
 414 
 415 void ShenandoahHeap::print_on(outputStream* st) const {
 416   st->print("Shenandoah Heap");
 417   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 418   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 419             p2i(reserved_region().start()),
 420             p2i(reserved_region().end()));
 421   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::region_size_bytes() / K);
 422   if (_concurrent_mark_in_progress) {
 423     st->print("marking ");
 424   }
 425   if (_evacuation_in_progress) {
 426     st->print("evacuating ");
 427   }
 428   if (cancelled_concgc()) {
 429     st->print("cancelled ");
 430   }
 431   st->print("\n");
 432 
 433   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 434   st->print   ("Virtual space:");
 435   if (_storage.special()) st->print(" (pinned in memory)");
 436   st->cr();
 437   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 438   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 439   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 440   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 441 
 442   if (Verbose) {
 443     print_heap_regions(st);
 444   }
 445 }
 446 
 447 class InitGCLABClosure : public ThreadClosure {
 448 public:
 449   void do_thread(Thread* thread) {
 450     thread->gclab().initialize(true);
 451   }
 452 };
 453 
 454 void ShenandoahHeap::post_initialize() {
 455   if (UseTLAB) {
 456     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 457     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 458     // Their initialization should be handled separately. Is we miss some threads here,
 459     // then any other TLAB-related activity would fail with asserts.
 460 
 461     InitGCLABClosure init_gclabs;
 462     {
 463       MutexLocker ml(Threads_lock);
 464       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 465         init_gclabs.do_thread(thread);
 466       }
 467     }
 468     gc_threads_do(&init_gclabs);
 469 
 470     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 471     // Now, we will let WorkGang to initialize gclab when new worker is created.
 472     _workers->set_initialize_gclab();
 473   }
 474 
 475   _scm->initialize(_max_workers);
 476 
 477   ref_processing_init();
 478 }
 479 
 480 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 481   size_t sum;
 482 public:
 483 
 484   CalculateUsedRegionClosure() {
 485     sum = 0;
 486   }
 487 
 488   bool doHeapRegion(ShenandoahHeapRegion* r) {
 489     sum = sum + r->used();
 490     return false;
 491   }
 492 
 493   size_t getResult() { return sum;}
 494 };
 495 
 496 size_t ShenandoahHeap::calculateUsed() {
 497   CalculateUsedRegionClosure cl;
 498   heap_region_iterate(&cl);
 499   return cl.getResult();
 500 }
 501 
 502 void ShenandoahHeap::verify_heap_size_consistency() {
 503 
 504   assert(calculateUsed() == used(),
 505          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 506 }
 507 
 508 size_t ShenandoahHeap::used() const {
 509   OrderAccess::acquire();
 510   return _used;
 511 }
 512 
 513 void ShenandoahHeap::increase_used(size_t bytes) {
 514   assert_heaplock_or_safepoint();
 515   _used += bytes;
 516 }
 517 
 518 void ShenandoahHeap::set_used(size_t bytes) {
 519   assert_heaplock_or_safepoint();
 520   _used = bytes;
 521 }
 522 
 523 void ShenandoahHeap::decrease_used(size_t bytes) {
 524   assert_heaplock_or_safepoint();
 525   assert(_used >= bytes, "never decrease heap size by more than we've left");
 526   _used -= bytes;
 527 }
 528 
 529 size_t ShenandoahHeap::capacity() const {
 530   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 531 }
 532 
 533 bool ShenandoahHeap::is_maximal_no_gc() const {
 534   Unimplemented();
 535   return true;
 536 }
 537 
 538 size_t ShenandoahHeap::max_capacity() const {
 539   return _max_regions * ShenandoahHeapRegion::region_size_bytes();
 540 }
 541 
 542 size_t ShenandoahHeap::min_capacity() const {
 543   return _initialSize;
 544 }
 545 
 546 VirtualSpace* ShenandoahHeap::storage() const {
 547   return (VirtualSpace*) &_storage;
 548 }
 549 
 550 bool ShenandoahHeap::is_in(const void* p) const {
 551   HeapWord* first_region_bottom = _first_region->bottom();
 552   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * _num_regions;
 553   return p >= _first_region_bottom && p < last_region_end;
 554 }
 555 
 556 bool ShenandoahHeap::is_scavengable(const void* p) {
 557   return true;
 558 }
 559 
 560 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 561   // Retain tlab and allocate object in shared space if
 562   // the amount free in the tlab is too large to discard.
 563   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 564     thread->gclab().record_slow_allocation(size);
 565     return NULL;
 566   }
 567 
 568   // Discard gclab and allocate a new one.
 569   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 570   size_t new_gclab_size = thread->gclab().compute_size(size);
 571 
 572   thread->gclab().clear_before_allocation();
 573 
 574   if (new_gclab_size == 0) {
 575     return NULL;
 576   }
 577 
 578   // Allocate a new GCLAB...
 579   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 580   if (obj == NULL) {
 581     return NULL;
 582   }
 583 
 584   if (ZeroTLAB) {
 585     // ..and clear it.
 586     Copy::zero_to_words(obj, new_gclab_size);
 587   } else {
 588     // ...and zap just allocated object.
 589 #ifdef ASSERT
 590     // Skip mangling the space corresponding to the object header to
 591     // ensure that the returned space is not considered parsable by
 592     // any concurrent GC thread.
 593     size_t hdr_size = oopDesc::header_size();
 594     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 595 #endif // ASSERT
 596   }
 597   thread->gclab().fill(obj, obj + size, new_gclab_size);
 598   return obj;
 599 }
 600 
 601 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 602   return allocate_new_tlab(word_size, false);
 603 }
 604 
 605 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 606   return allocate_new_tlab(word_size, true);
 607 }
 608 
 609 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 610   HeapWord* result = allocate_memory(word_size, evacuating);
 611 
 612   if (result != NULL) {
 613     assert(! in_collection_set(result), "Never allocate in dirty region");
 614     _bytes_allocated_since_cm += word_size * HeapWordSize;
 615 
 616     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 617 
 618   }
 619   return result;
 620 }
 621 
 622 ShenandoahHeap* ShenandoahHeap::heap() {
 623   CollectedHeap* heap = Universe::heap();
 624   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 625   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 626   return (ShenandoahHeap*) heap;
 627 }
 628 
 629 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 630   CollectedHeap* heap = Universe::heap();
 631   return (ShenandoahHeap*) heap;
 632 }
 633 
 634 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 635 
 636   ShenandoahHeapLock heap_lock(this);
 637 
 638   HeapWord* result = allocate_memory_under_lock(word_size);
 639   size_t grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::region_size_bytes() - 1) / ShenandoahHeapRegion::region_size_bytes();
 640 
 641   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 642     grow_heap_by(grow_by);
 643     result = allocate_memory_under_lock(word_size);
 644   }
 645 
 646   return result;
 647 }
 648 
 649 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 650   HeapWord* result = NULL;
 651   result = allocate_memory_work(word_size);
 652 
 653   if (!evacuating) {
 654     // Allocation failed, try full-GC, then retry allocation.
 655     //
 656     // It might happen that one of the threads requesting allocation would unblock
 657     // way later after full-GC happened, only to fail the second allocation, because
 658     // other threads have already depleted the free storage. In this case, a better
 659     // strategy would be to try full-GC again.
 660     //
 661     // Lacking the way to detect progress from "collect" call, we are left with blindly
 662     // retrying for some bounded number of times.
 663     // TODO: Poll if Full GC made enough progress to warrant retry.
 664     int tries = 0;
 665     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 666       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 667                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 668       collect(GCCause::_allocation_failure);
 669       result = allocate_memory_work(word_size);
 670     }
 671   }
 672 
 673   // Only update monitoring counters when not calling from a write-barrier.
 674   // Otherwise we might attempt to grab the Service_lock, which we must
 675   // not do when coming from a write-barrier (because the thread might
 676   // already hold the Compile_lock).
 677   if (! evacuating) {
 678     monitoring_support()->update_counters();
 679   }
 680 
 681   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 682                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 683 
 684   return result;
 685 }
 686 
 687 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 688   return evacuating && Thread::current()->is_Java_thread();
 689 }
 690 
 691 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 692   assert_heaplock_owned_by_current_thread();
 693 
 694   if (word_size * HeapWordSize > ShenandoahHeapRegion::region_size_bytes()) {
 695     return allocate_large_memory(word_size);
 696   }
 697 
 698   // Not enough memory in free region set.
 699   // Coming out of full GC, it is possible that there is not
 700   // free region available, so current_index may not be valid.
 701   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 702 
 703   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 704 
 705   if (my_current_region == NULL) {
 706     return NULL; // No more room to make a new region. OOM.
 707   }
 708   assert(my_current_region != NULL, "should have a region at this point");
 709 
 710 #ifdef ASSERT
 711   if (in_collection_set(my_current_region)) {
 712     print_heap_regions();
 713   }
 714 #endif
 715   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 716   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 717 
 718   HeapWord* result = my_current_region->allocate(word_size);
 719 
 720   while (result == NULL) {
 721     // 2nd attempt. Try next region.
 722     _free_regions->increase_used(my_current_region->free());
 723     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 724     assert(next_region != my_current_region, "must not get current again");
 725     my_current_region = next_region;
 726 
 727     if (my_current_region == NULL) {
 728       return NULL; // No more room to make a new region. OOM.
 729     }
 730     assert(my_current_region != NULL, "should have a region at this point");
 731     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 732     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 733     result = my_current_region->allocate(word_size);
 734   }
 735 
 736   my_current_region->increase_live_data_words(word_size);
 737   increase_used(word_size * HeapWordSize);
 738   _free_regions->increase_used(word_size * HeapWordSize);
 739   return result;
 740 }
 741 
 742 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 743   assert_heaplock_owned_by_current_thread();
 744 
 745   size_t required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 746   if (required_regions > _max_regions) return NULL;
 747 
 748   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 749 
 750   HeapWord* result = NULL;
 751 
 752   if (r != NULL)  {
 753     result = r->bottom();
 754 
 755     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 756                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 757   } else {
 758     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 759                              (words * HeapWordSize) / K, p2i(result));
 760   }
 761 
 762 
 763   return result;
 764 
 765 }
 766 
 767 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 768                                         bool*  gc_overhead_limit_was_exceeded) {
 769 
 770 #ifdef ASSERT
 771   if (ShenandoahVerify && _numAllocs > 1000000) {
 772     _numAllocs = 0;
 773   }
 774   _numAllocs++;
 775 #endif
 776   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), false);
 777   HeapWord* result = filler + BrooksPointer::word_size();
 778   if (filler != NULL) {
 779     BrooksPointer::initialize(oop(result));
 780     _bytes_allocated_since_cm += size * HeapWordSize;
 781 
 782     assert(! in_collection_set(result), "never allocate in targetted region");
 783     return result;
 784   } else {
 785     /*
 786     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 787                   size, used(), _bytes_allocated_since_cm);
 788     {
 789       print_heap_regions();
 790       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 791       _free_regions->print();
 792     }
 793     */
 794     return NULL;
 795   }
 796 }
 797 
 798 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 799 private:
 800   ShenandoahHeap* _heap;
 801   Thread* _thread;
 802   public:
 803   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 804     _heap(heap), _thread(Thread::current()) {
 805   }
 806 
 807   void do_object(oop p) {
 808 
 809     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 810 
 811     assert(_heap->is_marked_complete(p), "expect only marked objects");
 812     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 813       bool evac;
 814       _heap->evacuate_object(p, _thread, evac);
 815     }
 816   }
 817 };
 818 
 819 #ifdef ASSERT
 820 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 821 
 822 public:
 823 
 824   void do_object(oop p) {
 825     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 826       oop p_prime = oopDesc::bs()->read_barrier(p);
 827       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 828       if (p->klass() != p_prime->klass()) {
 829         tty->print_cr("copy has different class than original:");
 830         p->klass()->print_on(tty);
 831         p_prime->klass()->print_on(tty);
 832       }
 833       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 834       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 835       assert(p->size() == p_prime->size(), "Should be the same size");
 836       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 837     }
 838   }
 839 };
 840 
 841 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 842   VerifyEvacuatedObjectClosure verify_evacuation;
 843   marked_object_iterate(from_region, &verify_evacuation);
 844 }
 845 #endif
 846 
 847 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 848 
 849   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 850 
 851   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 852 
 853   marked_object_iterate(from_region, &evacuate_region);
 854 
 855 #ifdef ASSERT
 856   if (ShenandoahVerify && ! cancelled_concgc()) {
 857     verify_evacuated_region(from_region);
 858   }
 859 #endif
 860 }
 861 
 862 class ParallelEvacuationTask : public AbstractGangTask {
 863 private:
 864   ShenandoahHeap* _sh;
 865   ShenandoahCollectionSet* _cs;
 866 
 867 public:
 868   ParallelEvacuationTask(ShenandoahHeap* sh,
 869                          ShenandoahCollectionSet* cs) :
 870     AbstractGangTask("Parallel Evacuation Task"),
 871     _cs(cs),
 872     _sh(sh) {}
 873 
 874   void work(uint worker_id) {
 875 
 876     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 877 
 878     while (from_hr != NULL) {
 879       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 880                                     worker_id,
 881                                     from_hr->region_number());
 882 
 883       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 884       _sh->parallel_evacuate_region(from_hr);
 885 
 886       if (_sh->cancelled_concgc()) {
 887         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 888         break;
 889       }
 890       from_hr = _cs->claim_next();
 891     }
 892   }
 893 };
 894 
 895 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 896 private:
 897   ShenandoahHeap* _heap;
 898   size_t _bytes_reclaimed;
 899 public:
 900   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 901 
 902   bool doHeapRegion(ShenandoahHeapRegion* r) {
 903 
 904     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 905 
 906     if (_heap->in_collection_set(r)) {
 907       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 908       _heap->decrease_used(r->used());
 909       _bytes_reclaimed += r->used();
 910       r->recycle();
 911     }
 912 
 913     return false;
 914   }
 915   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 916   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 917 };
 918 
 919 void ShenandoahHeap::recycle_dirty_regions() {
 920   RecycleDirtyRegionsClosure cl;
 921   cl.clear_bytes_reclaimed();
 922 
 923   heap_region_iterate(&cl);
 924 
 925   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 926   if (! cancelled_concgc()) {
 927     clear_cset_fast_test();
 928   }
 929 }
 930 
 931 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 932   return _free_regions;
 933 }
 934 
 935 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 936   _ordered_regions->print(st);
 937 }
 938 
 939 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 940 private:
 941   int _index;
 942   const char* _prefix;
 943 
 944 public:
 945   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 946 
 947 private:
 948   template <class T>
 949   inline void do_oop_work(T* p) {
 950     oop o = oopDesc::load_decode_heap_oop(p);
 951     if (o != NULL) {
 952       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 953         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 954                       _prefix, _index,
 955                       p2i(p), p2i(o),
 956                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 957                       o->klass()->internal_name(), p2i(o->klass()));
 958       } else {
 959         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 960                       _prefix, _index,
 961                       p2i(p), p2i(o));
 962       }
 963     } else {
 964       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 965     }
 966     _index++;
 967   }
 968 
 969 public:
 970   void do_oop(oop* p) {
 971     do_oop_work(p);
 972   }
 973 
 974   void do_oop(narrowOop* p) {
 975     do_oop_work(p);
 976   }
 977 
 978 };
 979 
 980 class PrintAllRefsObjectClosure : public ObjectClosure {
 981   const char* _prefix;
 982 
 983 public:
 984   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 985 
 986   void do_object(oop p) {
 987     if (ShenandoahHeap::heap()->is_in(p)) {
 988         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 989                       _prefix, p2i(p),
 990                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 991                       p->klass()->internal_name(), p2i(p->klass()));
 992         PrintAllRefsOopClosure cl(_prefix);
 993         p->oop_iterate(&cl);
 994       }
 995   }
 996 };
 997 
 998 void ShenandoahHeap::print_all_refs(const char* prefix) {
 999   tty->print_cr("printing all references in the heap");
1000   tty->print_cr("root references:");
1001 
1002   ensure_parsability(false);
1003 
1004   PrintAllRefsOopClosure cl(prefix);
1005   roots_iterate(&cl);
1006 
1007   tty->print_cr("heap references:");
1008   PrintAllRefsObjectClosure cl2(prefix);
1009   object_iterate(&cl2);
1010 }
1011 
1012 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
1013 private:
1014   ShenandoahHeap*  _heap;
1015 
1016 public:
1017   VerifyAfterMarkingOopClosure() :
1018     _heap(ShenandoahHeap::heap()) { }
1019 
1020 private:
1021   template <class T>
1022   inline void do_oop_work(T* p) {
1023     oop o = oopDesc::load_decode_heap_oop(p);
1024     if (o != NULL) {
1025       if (! _heap->is_marked_complete(o)) {
1026         _heap->print_heap_regions();
1027         _heap->print_all_refs("post-mark");
1028         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1029                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1030         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1031 
1032         tty->print_cr("oop class: %s", o->klass()->internal_name());
1033         if (_heap->is_in(p)) {
1034           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1035           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1036           referrer->print();
1037           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1038         }
1039         tty->print_cr("heap region containing object:");
1040         _heap->heap_region_containing(o)->print();
1041         tty->print_cr("heap region containing referrer:");
1042         _heap->heap_region_containing(p)->print();
1043         tty->print_cr("heap region containing forwardee:");
1044         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1045       }
1046       assert(o->is_oop(), "oop must be an oop");
1047       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1048       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1049         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1050                       p2i(p),
1051                       BOOL_TO_STR(_heap->in_collection_set(p)),
1052                       p2i(o),
1053                       BOOL_TO_STR(_heap->in_collection_set(o)),
1054                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1055                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1056         tty->print_cr("oop class: %s", o->klass()->internal_name());
1057       }
1058       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1059       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1060       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1061     }
1062   }
1063 
1064 public:
1065   void do_oop(oop* p) {
1066     do_oop_work(p);
1067   }
1068 
1069   void do_oop(narrowOop* p) {
1070     do_oop_work(p);
1071   }
1072 
1073 };
1074 
1075 void ShenandoahHeap::verify_heap_after_marking() {
1076 
1077   verify_heap_size_consistency();
1078 
1079   log_trace(gc)("verifying heap after marking");
1080 
1081   VerifyAfterMarkingOopClosure cl;
1082   roots_iterate(&cl);
1083   ObjectToOopClosure objs(&cl);
1084   object_iterate(&objs);
1085 }
1086 
1087 
1088 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1089   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1090 
1091   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1092   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1093   size_t required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1094   size_t index = r->region_number();
1095 
1096 
1097   assert(!r->has_live(), "liveness must be zero");
1098 
1099   for(size_t i = 0; i < required_regions; i++) {
1100 
1101     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1102 
1103     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1104            "expect correct humongous start or continuation");
1105 
1106     if (log_is_enabled(Debug, gc, humongous)) {
1107       log_debug(gc, humongous)("reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1108       ResourceMark rm;
1109       outputStream* out = Log(gc, humongous)::debug_stream();
1110       region->print_on(out);
1111     }
1112 
1113     region->recycle();
1114     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::region_size_bytes());
1115   }
1116 }
1117 
1118 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1119 
1120   bool doHeapRegion(ShenandoahHeapRegion* r) {
1121     ShenandoahHeap* heap = ShenandoahHeap::heap();
1122 
1123     if (r->is_humongous_start()) {
1124       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1125       if (! heap->is_marked_complete(humongous_obj)) {
1126 
1127         heap->reclaim_humongous_region_at(r);
1128       }
1129     }
1130     return false;
1131   }
1132 };
1133 
1134 #ifdef ASSERT
1135 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1136   bool doHeapRegion(ShenandoahHeapRegion* r) {
1137     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1138     return false;
1139   }
1140 };
1141 #endif
1142 
1143 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1144   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1145 
1146   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1147 
1148   if (!cancelled_concgc()) {
1149 
1150     recycle_dirty_regions();
1151 
1152     ensure_parsability(true);
1153 
1154     if (UseShenandoahMatrix && PrintShenandoahMatrix) {
1155       outputStream* log = Log(gc)::info_stream();
1156       connection_matrix()->print_on(log);
1157     }
1158 
1159     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1160       verify_heap_reachable_at_safepoint();
1161     }
1162 
1163 #ifdef ASSERT
1164     if (ShenandoahVerify) {
1165       verify_heap_after_marking();
1166     }
1167 #endif
1168 
1169     // NOTE: This needs to be done during a stop the world pause, because
1170     // putting regions into the collection set concurrently with Java threads
1171     // will create a race. In particular, acmp could fail because when we
1172     // resolve the first operand, the containing region might not yet be in
1173     // the collection set, and thus return the original oop. When the 2nd
1174     // operand gets resolved, the region could be in the collection set
1175     // and the oop gets evacuated. If both operands have originally been
1176     // the same, we get false negatives.
1177 
1178     {
1179       ShenandoahHeapLock lock(this);
1180       _collection_set->clear();
1181       _free_regions->clear();
1182 
1183       ShenandoahReclaimHumongousRegionsClosure reclaim;
1184       heap_region_iterate(&reclaim);
1185 
1186 #ifdef ASSERT
1187       CheckCollectionSetClosure ccsc;
1188       _ordered_regions->heap_region_iterate(&ccsc);
1189 #endif
1190 
1191       _shenandoah_policy->choose_collection_set(_collection_set);
1192 
1193       _shenandoah_policy->choose_free_set(_free_regions);
1194     }
1195 
1196     _bytes_allocated_since_cm = 0;
1197 
1198     Universe::update_heap_info_at_gc();
1199   }
1200 }
1201 
1202 
1203 class RetireTLABClosure : public ThreadClosure {
1204 private:
1205   bool _retire;
1206 
1207 public:
1208   RetireTLABClosure(bool retire) : _retire(retire) {
1209   }
1210 
1211   void do_thread(Thread* thread) {
1212     thread->gclab().make_parsable(_retire);
1213   }
1214 };
1215 
1216 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1217   if (UseTLAB) {
1218     CollectedHeap::ensure_parsability(retire_tlabs);
1219     RetireTLABClosure cl(retire_tlabs);
1220     Threads::threads_do(&cl);
1221   }
1222 }
1223 
1224 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1225 private:
1226   ShenandoahHeap* _heap;
1227   Thread* _thread;
1228 public:
1229   ShenandoahEvacuateUpdateRootsClosure() :
1230     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1231   }
1232 
1233 private:
1234   template <class T>
1235   void do_oop_work(T* p) {
1236     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1237 
1238     T o = oopDesc::load_heap_oop(p);
1239     if (! oopDesc::is_null(o)) {
1240       oop obj = oopDesc::decode_heap_oop_not_null(o);
1241       if (_heap->in_collection_set(obj)) {
1242         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1243                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1244         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1245         if (oopDesc::unsafe_equals(resolved, obj)) {
1246           bool evac;
1247           resolved = _heap->evacuate_object(obj, _thread, evac);
1248         }
1249         oopDesc::encode_store_heap_oop(p, resolved);
1250       }
1251     }
1252 #ifdef ASSERT
1253     else {
1254       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1255       //               p2i(p),
1256       //               p2i((HeapWord*) obj),
1257       //               BOOL_TO_STR(_heap->is_in(obj)),
1258       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1259       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1260     }
1261 #endif
1262   }
1263 
1264 public:
1265   void do_oop(oop* p) {
1266     do_oop_work(p);
1267   }
1268   void do_oop(narrowOop* p) {
1269     do_oop_work(p);
1270   }
1271 };
1272 
1273 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1274   ShenandoahRootEvacuator* _rp;
1275 public:
1276 
1277   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1278     AbstractGangTask("Shenandoah evacuate and update roots"),
1279     _rp(rp)
1280   {
1281     // Nothing else to do.
1282   }
1283 
1284   void work(uint worker_id) {
1285     ShenandoahEvacuateUpdateRootsClosure cl;
1286     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1287 
1288     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1289   }
1290 };
1291 
1292 class ShenandoahFixRootsTask : public AbstractGangTask {
1293   ShenandoahRootEvacuator* _rp;
1294 public:
1295 
1296   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1297     AbstractGangTask("Shenandoah update roots"),
1298     _rp(rp)
1299   {
1300     // Nothing else to do.
1301   }
1302 
1303   void work(uint worker_id) {
1304     SCMUpdateRefsClosure cl;
1305     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1306 
1307     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1308   }
1309 };
1310 void ShenandoahHeap::evacuate_and_update_roots() {
1311 
1312   COMPILER2_PRESENT(DerivedPointerTable::clear());
1313 
1314 #ifdef ASSERT
1315   if (ShenandoahVerifyReadsToFromSpace) {
1316     set_from_region_protection(false);
1317   }
1318 #endif
1319 
1320   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1321   ClassLoaderDataGraph::clear_claimed_marks();
1322 
1323   {
1324     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1325     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1326     workers()->run_task(&roots_task);
1327   }
1328 
1329   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1330 
1331   if (cancelled_concgc()) {
1332     // If initial evacuation has been cancelled, we need to update all references
1333     // after all workers have finished. Otherwise we might run into the following problem:
1334     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1335     // GC thread 2 evacuates the same object X to to-space
1336     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1337     // clear() and update_pointers() must always be called in pairs,
1338     // cannot nest with above clear()/update_pointers().
1339     COMPILER2_PRESENT(DerivedPointerTable::clear());
1340     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1341     ShenandoahFixRootsTask update_roots_task(&rp);
1342     workers()->run_task(&update_roots_task);
1343     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1344   }
1345 
1346 #ifdef ASSERT
1347   if (ShenandoahVerifyReadsToFromSpace) {
1348     set_from_region_protection(true);
1349   }
1350 #endif
1351 
1352 #ifdef ASSERT
1353   {
1354     AssertToSpaceClosure cl;
1355     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1356     ShenandoahRootEvacuator rp(this, 1);
1357     rp.process_evacuate_roots(&cl, &code_cl, 0);
1358   }
1359 #endif
1360 }
1361 
1362 
1363 void ShenandoahHeap::do_evacuation() {
1364 
1365   parallel_evacuate();
1366 
1367   if (ShenandoahVerify && ! cancelled_concgc()) {
1368     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1369     if (Thread::current()->is_VM_thread()) {
1370       verify_after_evacuation.doit();
1371     } else {
1372       VMThread::execute(&verify_after_evacuation);
1373     }
1374   }
1375 
1376 }
1377 
1378 void ShenandoahHeap::parallel_evacuate() {
1379   log_develop_trace(gc)("starting parallel_evacuate");
1380 
1381   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1382 
1383   if (log_is_enabled(Trace, gc, region)) {
1384     ResourceMark rm;
1385     outputStream *out = Log(gc, region)::trace_stream();
1386     out->print("Printing all available regions");
1387     print_heap_regions(out);
1388   }
1389 
1390   if (log_is_enabled(Trace, gc, cset)) {
1391     ResourceMark rm;
1392     outputStream *out = Log(gc, cset)::trace_stream();
1393     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1394     _collection_set->print(out);
1395 
1396     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1397     _free_regions->print(out);
1398   }
1399 
1400   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1401 
1402 
1403   workers()->run_task(&evacuationTask);
1404 
1405   if (log_is_enabled(Trace, gc, cset)) {
1406     ResourceMark rm;
1407     outputStream *out = Log(gc, cset)::trace_stream();
1408     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1409                _collection_set->count());
1410 
1411     _collection_set->print(out);
1412 
1413     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1414                _free_regions->count());
1415     _free_regions->print(out);
1416 
1417   }
1418 
1419   if (log_is_enabled(Trace, gc, region)) {
1420     ResourceMark rm;
1421     outputStream *out = Log(gc, region)::trace_stream();
1422     out->print_cr("all regions after evacuation:");
1423     print_heap_regions(out);
1424   }
1425 
1426   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1427 }
1428 
1429 class VerifyEvacuationClosure: public ExtendedOopClosure {
1430 private:
1431   ShenandoahHeap*  _heap;
1432   ShenandoahHeapRegion* _from_region;
1433 
1434 public:
1435   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1436     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1437 private:
1438   template <class T>
1439   inline void do_oop_work(T* p) {
1440     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1441     if (! oopDesc::is_null(heap_oop)) {
1442       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1443     }
1444   }
1445 
1446 public:
1447   void do_oop(oop* p)       {
1448     do_oop_work(p);
1449   }
1450 
1451   void do_oop(narrowOop* p) {
1452     do_oop_work(p);
1453   }
1454 
1455 };
1456 
1457 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1458 
1459   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1460 
1461   CodeBlobToOopClosure blobsCl(cl, false);
1462   CLDToOopClosure cldCl(cl);
1463 
1464   ClassLoaderDataGraph::clear_claimed_marks();
1465 
1466   ShenandoahRootProcessor rp(this, 1);
1467   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1468 }
1469 
1470 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1471 
1472   VerifyEvacuationClosure rootsCl(from_region);
1473   roots_iterate(&rootsCl);
1474 
1475 }
1476 
1477 bool ShenandoahHeap::supports_tlab_allocation() const {
1478   return true;
1479 }
1480 
1481 
1482 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1483   size_t idx = _free_regions->current_index();
1484   ShenandoahHeapRegion* current = _free_regions->get(idx);
1485   if (current == NULL) {
1486     return 0;
1487   } else if (current->free() > MinTLABSize) {
1488     // Current region has enough space left, can use it.
1489     return current->free();
1490   } else {
1491     // No more space in current region, we will take next free region
1492     // on the next TLAB allocation.
1493     return ShenandoahHeapRegion::region_size_bytes();
1494   }
1495 }
1496 
1497 size_t ShenandoahHeap::max_tlab_size() const {
1498   return ShenandoahHeapRegion::region_size_bytes();
1499 }
1500 
1501 class ResizeGCLABClosure : public ThreadClosure {
1502 public:
1503   void do_thread(Thread* thread) {
1504     thread->gclab().resize();
1505   }
1506 };
1507 
1508 void ShenandoahHeap::resize_all_tlabs() {
1509   CollectedHeap::resize_all_tlabs();
1510 
1511   ResizeGCLABClosure cl;
1512   Threads::threads_do(&cl);
1513 }
1514 
1515 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1516 public:
1517   void do_thread(Thread* thread) {
1518     thread->gclab().accumulate_statistics();
1519     thread->gclab().initialize_statistics();
1520   }
1521 };
1522 
1523 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1524   AccumulateStatisticsGCLABClosure cl;
1525   Threads::threads_do(&cl);
1526 }
1527 
1528 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1529   return true;
1530 }
1531 
1532 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1533   // Overridden to do nothing.
1534   return new_obj;
1535 }
1536 
1537 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1538   return true;
1539 }
1540 
1541 bool ShenandoahHeap::card_mark_must_follow_store() const {
1542   return false;
1543 }
1544 
1545 void ShenandoahHeap::collect(GCCause::Cause cause) {
1546   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1547   if (GCCause::is_user_requested_gc(cause)) {
1548     if (! DisableExplicitGC) {
1549       _concurrent_gc_thread->do_full_gc(cause);
1550     }
1551   } else if (cause == GCCause::_allocation_failure) {
1552     collector_policy()->set_should_clear_all_soft_refs(true);
1553     _concurrent_gc_thread->do_full_gc(cause);
1554   }
1555 }
1556 
1557 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1558   //assert(false, "Shouldn't need to do full collections");
1559 }
1560 
1561 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1562   Unimplemented();
1563   return NULL;
1564 
1565 }
1566 
1567 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1568   return _shenandoah_policy;
1569 }
1570 
1571 
1572 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1573   Space* sp = heap_region_containing(addr);
1574   if (sp != NULL) {
1575     return sp->block_start(addr);
1576   }
1577   return NULL;
1578 }
1579 
1580 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1581   Space* sp = heap_region_containing(addr);
1582   assert(sp != NULL, "block_size of address outside of heap");
1583   return sp->block_size(addr);
1584 }
1585 
1586 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1587   Space* sp = heap_region_containing(addr);
1588   return sp->block_is_obj(addr);
1589 }
1590 
1591 jlong ShenandoahHeap::millis_since_last_gc() {
1592   return 0;
1593 }
1594 
1595 void ShenandoahHeap::prepare_for_verify() {
1596   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1597     ensure_parsability(false);
1598   }
1599 }
1600 
1601 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1602   workers()->print_worker_threads_on(st);
1603 }
1604 
1605 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1606   workers()->threads_do(tcl);
1607 }
1608 
1609 void ShenandoahHeap::print_tracing_info() const {
1610   if (log_is_enabled(Info, gc, stats)) {
1611     ResourceMark rm;
1612     outputStream* out = Log(gc, stats)::info_stream();
1613     _shenandoah_policy->print_tracing_info(out);
1614   }
1615 }
1616 
1617 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1618 private:
1619   ShenandoahHeap*  _heap;
1620   VerifyOption     _vo;
1621   bool             _failures;
1622 public:
1623   // _vo == UsePrevMarking -> use "prev" marking information,
1624   // _vo == UseNextMarking -> use "next" marking information,
1625   // _vo == UseMarkWord    -> use mark word from object header.
1626   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1627     _heap(ShenandoahHeap::heap()),
1628     _vo(vo),
1629     _failures(false) { }
1630 
1631   bool failures() { return _failures; }
1632 
1633 private:
1634   template <class T>
1635   inline void do_oop_work(T* p) {
1636     oop obj = oopDesc::load_decode_heap_oop(p);
1637     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1638       { // Just for debugging.
1639         tty->print_cr("Root location "PTR_FORMAT
1640                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1641         //      obj->print_on(tty);
1642       }
1643     }
1644     guarantee(obj->is_oop_or_null(), "is oop or null");
1645   }
1646 
1647 public:
1648   void do_oop(oop* p)       {
1649     do_oop_work(p);
1650   }
1651 
1652   void do_oop(narrowOop* p) {
1653     do_oop_work(p);
1654   }
1655 
1656 };
1657 
1658 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1659 private:
1660   ShenandoahVerifyRootsClosure _rootsCl;
1661 public:
1662   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1663     _rootsCl(rc) {};
1664 
1665   void do_object(oop p) {
1666     _rootsCl.do_oop(&p);
1667   }
1668 };
1669 
1670 class ShenandoahVerifyKlassClosure: public KlassClosure {
1671   OopClosure *_oop_closure;
1672  public:
1673   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1674   void do_klass(Klass* k) {
1675     k->oops_do(_oop_closure);
1676   }
1677 };
1678 
1679 void ShenandoahHeap::verify(VerifyOption vo) {
1680   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1681 
1682     ShenandoahVerifyRootsClosure rootsCl(vo);
1683 
1684     assert(Thread::current()->is_VM_thread(),
1685            "Expected to be executed serially by the VM thread at this point");
1686 
1687     roots_iterate(&rootsCl);
1688 
1689     bool failures = rootsCl.failures();
1690     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1691 
1692     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1693 
1694     object_iterate(&heapCl);
1695     // TODO: Implement rest of it.
1696   } else {
1697     tty->print("(SKIPPING roots, heapRegions, remset) ");
1698   }
1699 }
1700 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1701   return _free_regions->capacity();
1702 }
1703 
1704 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1705   ObjectClosure* _cl;
1706 public:
1707   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1708   bool doHeapRegion(ShenandoahHeapRegion* r) {
1709     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1710     return false;
1711   }
1712 };
1713 
1714 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1715   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1716   heap_region_iterate(&blk, false, true);
1717 }
1718 
1719 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1720 private:
1721   ShenandoahHeap* _heap;
1722 
1723 public:
1724   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1725 
1726 private:
1727   template <class T>
1728   inline void do_oop_work(T* p) {
1729     T o = oopDesc::load_heap_oop(p);
1730     if (!oopDesc::is_null(o)) {
1731       oop obj = oopDesc::decode_heap_oop_not_null(o);
1732       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1733     }
1734   }
1735 public:
1736   void do_oop(oop* p) {
1737     do_oop_work(p);
1738   }
1739   void do_oop(narrowOop* p) {
1740     do_oop_work(p);
1741   }
1742 };
1743 
1744 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1745 private:
1746   ObjectClosure* _cl;
1747 public:
1748   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1749 
1750   virtual void do_object(oop obj) {
1751     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1752             "avoid double-counting: only non-forwarded objects here");
1753 
1754     // Fix up the ptrs.
1755     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1756     obj->oop_iterate(&adjust_ptrs);
1757 
1758     // Can reply the object now:
1759     _cl->do_object(obj);
1760   }
1761 };
1762 
1763 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1764   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1765 
1766   // Safe iteration does objects only with correct references.
1767   // This is why we skip dirty regions that have stale copies of objects,
1768   // and fix up the pointers in the returned objects.
1769 
1770   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1771   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1772   heap_region_iterate(&blk,
1773                       /* skip_dirty_regions = */ true,
1774                       /* skip_humongous_continuations = */ true);
1775 
1776   _need_update_refs = false; // already updated the references
1777 }
1778 
1779 // Apply blk->doHeapRegion() on all committed regions in address order,
1780 // terminating the iteration early if doHeapRegion() returns true.
1781 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1782   for (size_t i = 0; i < _num_regions; i++) {
1783     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1784     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1785       continue;
1786     }
1787     if (skip_dirty_regions && in_collection_set(current)) {
1788       continue;
1789     }
1790     if (blk->doHeapRegion(current)) {
1791       return;
1792     }
1793   }
1794 }
1795 
1796 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1797   ShenandoahHeap* sh;
1798 public:
1799   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1800 
1801   bool doHeapRegion(ShenandoahHeapRegion* r) {
1802     r->clear_live_data();
1803     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1804     return false;
1805   }
1806 };
1807 
1808 void ShenandoahHeap::start_concurrent_marking() {
1809 
1810   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1811   accumulate_statistics_all_tlabs();
1812   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1813 
1814   set_concurrent_mark_in_progress(true);
1815   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1816   if (UseTLAB) {
1817     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1818     ensure_parsability(true);
1819     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1820   }
1821 
1822   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1823   _used_start_gc = used();
1824 
1825 #ifdef ASSERT
1826   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1827     ensure_parsability(false);
1828     print_all_refs("pre-mark");
1829   }
1830 #endif
1831 
1832   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1833   ClearLivenessClosure clc(this);
1834   heap_region_iterate(&clc);
1835   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1836 
1837   if (UseShenandoahMatrix) {
1838     connection_matrix()->clear_all();
1839   }
1840   // print_all_refs("pre -mark");
1841 
1842   // oopDesc::_debug = true;
1843 
1844   // Make above changes visible to worker threads
1845   OrderAccess::fence();
1846 
1847   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1848   concurrentMark()->init_mark_roots();
1849   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1850 
1851   //  print_all_refs("pre-mark2");
1852 }
1853 
1854 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1855 
1856   ShenandoahHeap* _sh;
1857 
1858 public:
1859   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1860 
1861   template<class T> void do_oop_nv(T* p) {
1862     T heap_oop = oopDesc::load_heap_oop(p);
1863     if (!oopDesc::is_null(heap_oop)) {
1864       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1865       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1866                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1867                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1868                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1869                 obj->klass()->external_name(),
1870                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1871                 );
1872       obj = oopDesc::bs()->read_barrier(obj);
1873       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1874       guarantee(obj->is_oop(), "is_oop");
1875       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1876     }
1877   }
1878 
1879   void do_oop(oop* p)       { do_oop_nv(p); }
1880   void do_oop(narrowOop* p) { do_oop_nv(p); }
1881 
1882 };
1883 
1884 void ShenandoahHeap::verify_heap_after_evacuation() {
1885 
1886   verify_heap_size_consistency();
1887 
1888   ensure_parsability(false);
1889 
1890   VerifyAfterEvacuationClosure cl;
1891   roots_iterate(&cl);
1892 
1893   ObjectToOopClosure objs(&cl);
1894   object_iterate(&objs);
1895 
1896 }
1897 
1898 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1899 public:
1900   bool doHeapRegion(ShenandoahHeapRegion* r) {
1901     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1902     return false;
1903   }
1904 };
1905 
1906 void ShenandoahHeap::swap_mark_bitmaps() {
1907   // Swap bitmaps.
1908   CMBitMap* tmp1 = _complete_mark_bit_map;
1909   _complete_mark_bit_map = _next_mark_bit_map;
1910   _next_mark_bit_map = tmp1;
1911 
1912   // Swap top-at-mark-start pointers
1913   HeapWord** tmp2 = _complete_top_at_mark_starts;
1914   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1915   _next_top_at_mark_starts = tmp2;
1916 
1917   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1918   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1919   _next_top_at_mark_starts_base = tmp3;
1920 }
1921 
1922 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1923 private:
1924   SCMObjToScanQueue* _queue;
1925   ShenandoahHeap* _heap;
1926   CMBitMap* _map;
1927   bool _check_matrix;
1928   oop _obj;
1929 public:
1930   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1931           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1932   template <class T>
1933   void do_oop_work(T* p) {
1934     T o = oopDesc::load_heap_oop(p);
1935     if (!oopDesc::is_null(o)) {
1936       oop obj = oopDesc::decode_heap_oop_not_null(o);
1937       guarantee(check_obj_alignment(obj), "sanity");
1938 
1939       guarantee(!oopDesc::is_null(obj), "sanity");
1940       guarantee(_heap->is_in(obj), "sanity");
1941 
1942       oop forw = BrooksPointer::forwardee(obj);
1943       guarantee(!oopDesc::is_null(forw), "sanity");
1944       guarantee(_heap->is_in(forw), "sanity");
1945 
1946       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1947 
1948       if (_check_matrix) {
1949         size_t from_idx = _heap->heap_region_index_containing(p);
1950         size_t to_idx = _heap->heap_region_index_containing(obj);
1951         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1952           tty->print_cr("from-obj: ");
1953           _obj->print_on(tty);
1954           tty->print_cr("to-obj:");
1955           obj->print_on(tty);
1956           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1957           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1958           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1959           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1960           tty->print_cr("from-idx: " SIZE_FORMAT ", to-idx: " SIZE_FORMAT, from_idx, to_idx);
1961 
1962           oop fwd_from = BrooksPointer::forwardee(_obj);
1963           oop fwd_to = BrooksPointer::forwardee(obj);
1964           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1965           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1966           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1967           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1968           size_t fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1969           size_t fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1970           tty->print_cr("forward(from-idx): " SIZE_FORMAT ", forward(to-idx): " SIZE_FORMAT, fwd_from_idx, fwd_to_idx);
1971           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1972         }
1973         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1974         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1975       }
1976 
1977       if (_map->parMark((HeapWord*) obj)) {
1978         _queue->push(SCMTask(obj));
1979       }
1980     }
1981   }
1982 
1983   void do_oop(oop* p) { do_oop_work(p); }
1984   void do_oop(narrowOop* p) { do_oop_work(p); }
1985   void set_obj(oop o) { _obj = o; }
1986 };
1987 
1988 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1989   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1990   guarantee(ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix),
1991             "only when these are enabled, and bitmap is initialized in ShenandoahHeap::initialize");
1992 
1993   OrderAccess::fence();
1994   ensure_parsability(false);
1995 
1996   // Allocate temporary bitmap for storing marking wavefront:
1997   MemRegion mr = MemRegion(_verification_bit_map.startWord(), _verification_bit_map.endWord());
1998   _verification_bit_map.clear_range_large(mr);
1999 
2000   // Initialize a single queue
2001   SCMObjToScanQueue* q = new SCMObjToScanQueue();
2002   q->initialize();
2003 
2004   // Scan root set
2005   ClassLoaderDataGraph::clear_claimed_marks();
2006   ShenandoahRootProcessor rp(this, 1);
2007 
2008   {
2009     VerifyReachableHeapClosure cl(q, &_verification_bit_map, false);
2010     CLDToOopClosure cld_cl(&cl);
2011     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
2012     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
2013   }
2014 
2015   // Finish the scan
2016   {
2017     VerifyReachableHeapClosure cl(q, &_verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
2018     SCMTask task;
2019     while ((q->pop_buffer(task) ||
2020             q->pop_local(task) ||
2021             q->pop_overflow(task))) {
2022       oop obj = task.obj();
2023       assert(!oopDesc::is_null(obj), "must not be null");
2024       cl.set_obj(obj);
2025       obj->oop_iterate(&cl);
2026     }
2027   }
2028 
2029   // Clean up!
2030   delete(q);
2031 }
2032 
2033 void ShenandoahHeap::stop_concurrent_marking() {
2034   assert(concurrent_mark_in_progress(), "How else could we get here?");
2035   if (! cancelled_concgc()) {
2036     // If we needed to update refs, and concurrent marking has been cancelled,
2037     // we need to finish updating references.
2038     set_need_update_refs(false);
2039     swap_mark_bitmaps();
2040   }
2041   set_concurrent_mark_in_progress(false);
2042 
2043   if (log_is_enabled(Trace, gc, region)) {
2044     ResourceMark rm;
2045     outputStream* out = Log(gc, region)::trace_stream();
2046     print_heap_regions(out);
2047   }
2048 
2049 }
2050 
2051 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2052   _concurrent_mark_in_progress = in_progress ? 1 : 0;
2053   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2054 }
2055 
2056 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
2057   // Note: it is important to first release the _evacuation_in_progress flag here,
2058   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
2059   // in case a VM task is pending.
2060   set_evacuation_in_progress(in_progress);
2061   MutexLocker mu(Threads_lock);
2062   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2063 }
2064 
2065 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
2066   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
2067   set_evacuation_in_progress(in_progress);
2068   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2069 }
2070 
2071 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2072   _evacuation_in_progress = in_progress ? 1 : 0;
2073   OrderAccess::fence();
2074 }
2075 
2076 void ShenandoahHeap::verify_copy(oop p,oop c){
2077     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2078     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2079     if (p->klass() != c->klass()) {
2080       print_heap_regions();
2081     }
2082     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2083     assert(p->size() == c->size(), "verify size");
2084     // Object may have been locked between copy and verification
2085     //    assert(p->mark() == c->mark(), "verify mark");
2086     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2087   }
2088 
2089 void ShenandoahHeap::oom_during_evacuation() {
2090   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2091                         Thread::current()->osthread()->thread_id());
2092 
2093   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2094   collector_policy()->set_should_clear_all_soft_refs(true);
2095   concurrent_thread()->try_set_full_gc();
2096   cancel_concgc(_oom_evacuation);
2097 
2098   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2099     assert(! Threads_lock->owned_by_self()
2100            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2101     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2102     while (_evacuation_in_progress) { // wait.
2103       Thread::current()->_ParkEvent->park(1);
2104     }
2105   }
2106 
2107 }
2108 
2109 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2110   // Initialize Brooks pointer for the next object
2111   HeapWord* result = obj + BrooksPointer::word_size();
2112   BrooksPointer::initialize(oop(result));
2113   return result;
2114 }
2115 
2116 uint ShenandoahHeap::oop_extra_words() {
2117   return BrooksPointer::word_size();
2118 }
2119 
2120 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2121   size_t base = _num_regions;
2122   ensure_new_regions(num_regions);
2123   for (size_t i = 0; i < num_regions; i++) {
2124     size_t new_region_index = i + base;
2125     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * new_region_index;
2126     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::region_size_bytes() / HeapWordSize, new_region_index);
2127 
2128     if (log_is_enabled(Trace, gc, region)) {
2129       ResourceMark rm;
2130       outputStream* out = Log(gc, region)::trace_stream();
2131       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2132       new_region->print_on(out);
2133     }
2134 
2135     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2136     _ordered_regions->add_region(new_region);
2137     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2138     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2139     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2140 
2141     _free_regions->add_region(new_region);
2142   }
2143 }
2144 
2145 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2146 
2147   size_t num_regions = _num_regions;
2148   size_t new_num_regions = num_regions + new_regions;
2149   assert(new_num_regions <= _max_regions, "we checked this earlier");
2150 
2151   size_t expand_size = new_regions * ShenandoahHeapRegion::region_size_bytes();
2152   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2153   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2154   assert(success, "should always be able to expand by requested size");
2155 
2156   _num_regions = new_num_regions;
2157 
2158 }
2159 
2160 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2161   _heap(ShenandoahHeap::heap_no_check()) {
2162 }
2163 
2164 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2165   _heap = heap;
2166 }
2167 
2168 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2169 
2170   assert(_heap != NULL, "sanity");
2171   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2172 #ifdef ASSERT
2173   if (_heap->concurrent_mark_in_progress()) {
2174     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2175   }
2176 #endif
2177   assert(!oopDesc::is_null(obj), "null");
2178   return _heap->is_marked_next(obj);
2179 }
2180 
2181 void ShenandoahHeap::ref_processing_init() {
2182   MemRegion mr = reserved_region();
2183 
2184   isAlive.init(ShenandoahHeap::heap());
2185   assert(_max_workers > 0, "Sanity");
2186 
2187   _ref_processor =
2188     new ReferenceProcessor(mr,    // span
2189                            ParallelRefProcEnabled,
2190                            // mt processing
2191                            _max_workers,
2192                            // degree of mt processing
2193                            true,
2194                            // mt discovery
2195                            _max_workers,
2196                            // degree of mt discovery
2197                            false,
2198                            // Reference discovery is not atomic
2199                            &isAlive);
2200 }
2201 
2202 #ifdef ASSERT
2203 void ShenandoahHeap::set_from_region_protection(bool protect) {
2204   for (uint i = 0; i < _num_regions; i++) {
2205     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2206     if (region != NULL && in_collection_set(region)) {
2207       if (protect) {
2208         region->memProtectionOn();
2209       } else {
2210         region->memProtectionOff();
2211       }
2212     }
2213   }
2214 }
2215 #endif
2216 
2217 size_t ShenandoahHeap::num_regions() {
2218   return _num_regions;
2219 }
2220 
2221 size_t ShenandoahHeap::max_regions() {
2222   return _max_regions;
2223 }
2224 
2225 GCTracer* ShenandoahHeap::tracer() {
2226   return shenandoahPolicy()->tracer();
2227 }
2228 
2229 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2230   return _free_regions->used();
2231 }
2232 
2233 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2234   if (try_cancel_concgc()) {
2235     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2236     _shenandoah_policy->report_concgc_cancelled();
2237   }
2238 }
2239 
2240 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2241   if (try_cancel_concgc()) {
2242     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2243     _shenandoah_policy->report_concgc_cancelled();
2244   }
2245 }
2246 
2247 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2248   switch (cause) {
2249     case _oom_evacuation:
2250       return "Out of memory for evacuation";
2251     case _vm_stop:
2252       return "Stopping VM";
2253     default:
2254       return "Unknown";
2255   }
2256 }
2257 
2258 uint ShenandoahHeap::max_workers() {
2259   return _max_workers;
2260 }
2261 
2262 void ShenandoahHeap::stop() {
2263   // The shutdown sequence should be able to terminate when GC is running.
2264 
2265   // Step 1. Notify control thread that we are in shutdown.
2266   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2267   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2268   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2269 
2270   // Step 2. Notify GC workers that we are cancelling GC.
2271   cancel_concgc(_vm_stop);
2272 
2273   // Step 3. Wait until GC worker exits normally.
2274   _concurrent_gc_thread->stop();
2275 }
2276 
2277 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2278 
2279   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2280   workers()->run_task(&shenandoah_unlink_task);
2281 
2282   //  if (G1StringDedup::is_enabled()) {
2283   //    G1StringDedup::unlink(is_alive);
2284   //  }
2285 }
2286 
2287 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2288   _need_update_refs = need_update_refs;
2289 }
2290 
2291 //fixme this should be in heapregionset
2292 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2293   size_t region_idx = r->region_number() + 1;
2294   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2295   guarantee(next->region_number() == region_idx, "region number must match");
2296   while (next->is_humongous()) {
2297     region_idx = next->region_number() + 1;
2298     next = _ordered_regions->get(region_idx);
2299     guarantee(next->region_number() == region_idx, "region number must match");
2300   }
2301   return next;
2302 }
2303 
2304 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2305   _in_cset_fast_test_base[region_index] = b;
2306 }
2307 
2308 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2309   return _monitoring_support;
2310 }
2311 
2312 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2313   return _complete_mark_bit_map;
2314 }
2315 
2316 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2317   return _next_mark_bit_map;
2318 }
2319 
2320 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2321   _free_regions->add_region(r);
2322 }
2323 
2324 void ShenandoahHeap::clear_free_regions() {
2325   _free_regions->clear();
2326 }
2327 
2328 address ShenandoahHeap::in_cset_fast_test_addr() {
2329   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2330 }
2331 
2332 address ShenandoahHeap::cancelled_concgc_addr() {
2333   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2334 }
2335 
2336 void ShenandoahHeap::clear_cset_fast_test() {
2337   assert(_in_cset_fast_test_base != NULL, "sanity");
2338   memset(_in_cset_fast_test_base, false,
2339          _in_cset_fast_test_length * sizeof(bool));
2340 }
2341 
2342 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2343   return ShenandoahMaxRegionSize;
2344 }
2345 
2346 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2347   return _bytes_allocated_since_cm;
2348 }
2349 
2350 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2351   _bytes_allocated_since_cm = bytes;
2352 }
2353 
2354 size_t ShenandoahHeap::max_allocated_gc() {
2355   return _max_allocated_gc;
2356 }
2357 
2358 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2359   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2360   _next_top_at_mark_starts[index] = addr;
2361 }
2362 
2363 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2364   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2365   return _next_top_at_mark_starts[index];
2366 }
2367 
2368 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2369   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2370   _complete_top_at_mark_starts[index] = addr;
2371 }
2372 
2373 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2374   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2375   return _complete_top_at_mark_starts[index];
2376 }
2377 
2378 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2379   _full_gc_in_progress = in_progress;
2380 }
2381 
2382 bool ShenandoahHeap::is_full_gc_in_progress() const {
2383   return _full_gc_in_progress;
2384 }
2385 
2386 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2387   _update_refs_in_progress = in_progress;
2388 }
2389 
2390 bool ShenandoahHeap::is_update_refs_in_progress() const {
2391   return _update_refs_in_progress;
2392 }
2393 
2394 class NMethodOopInitializer : public OopClosure {
2395 private:
2396   ShenandoahHeap* _heap;
2397 public:
2398   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2399   }
2400 
2401 private:
2402   template <class T>
2403   inline void do_oop_work(T* p) {
2404     T o = oopDesc::load_heap_oop(p);
2405     if (! oopDesc::is_null(o)) {
2406       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2407       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2408       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2409         oopDesc::encode_store_heap_oop(p, obj2);
2410       }
2411     }
2412   }
2413 
2414 public:
2415   void do_oop(oop* o) {
2416     do_oop_work(o);
2417   }
2418   void do_oop(narrowOop* o) {
2419     do_oop_work(o);
2420   }
2421 };
2422 
2423 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2424   NMethodOopInitializer init;
2425   nm->oops_do(&init);
2426   nm->fix_oop_relocations();
2427 }
2428 
2429 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2430 }
2431 
2432 void ShenandoahHeap::pin_object(oop o) {
2433   heap_region_containing(o)->pin();
2434 }
2435 
2436 void ShenandoahHeap::unpin_object(oop o) {
2437   heap_region_containing(o)->unpin();
2438 }
2439 
2440 
2441 GCTimer* ShenandoahHeap::gc_timer() const {
2442   return _gc_timer;
2443 }
2444 
2445 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2446 private:
2447   size_t _x;
2448   int *_matrix;
2449   size_t _num_regions;
2450   oop _p;
2451 
2452 public:
2453   RecordAllRefsOopClosure(int *matrix, size_t x, size_t num_regions, oop p) :
2454     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2455 
2456   template <class T>
2457   void do_oop_work(T* p) {
2458     oop o = oopDesc::load_decode_heap_oop(p);
2459     if (o != NULL) {
2460       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2461         size_t y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2462         _matrix[_x * _num_regions + y]++;
2463       }
2464     }
2465   }
2466   void do_oop(oop* p) {
2467     do_oop_work(p);
2468   }
2469 
2470   void do_oop(narrowOop* p) {
2471     do_oop_work(p);
2472   }
2473 
2474 };
2475 
2476 class RecordAllRefsObjectClosure : public ObjectClosure {
2477   int *_matrix;
2478   size_t _num_regions;
2479 
2480 public:
2481   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2482     _matrix(matrix), _num_regions(num_regions) {}
2483 
2484   void do_object(oop p) {
2485     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2486       size_t x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2487       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2488       p->oop_iterate(&cl);
2489     }
2490   }
2491 };
2492 void ShenandoahHeap::calculate_matrix(int* connections) {
2493   log_develop_trace(gc)("calculating matrix");
2494   ensure_parsability(false);
2495   size_t num = num_regions();
2496 
2497   for (size_t i = 0; i < num; i++) {
2498     for (size_t j = 0; j < num; j++) {
2499       connections[i * num + j] = 0;
2500     }
2501   }
2502 
2503   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2504   roots_iterate(&cl);
2505 
2506   RecordAllRefsObjectClosure cl2(connections, num);
2507   object_iterate(&cl2);
2508 
2509 }
2510 
2511 void ShenandoahHeap::print_matrix(int* connections) {
2512   size_t num = num_regions();
2513   int cs_regions = 0;
2514   int referenced = 0;
2515 
2516   for (size_t i = 0; i < num; i++) {
2517     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2518 
2519     int numReferencedRegions = 0;
2520     int numReferencedByRegions = 0;
2521 
2522     for (size_t j = 0; j < num; j++) {
2523       if (connections[i * num + j] > 0)
2524         numReferencedRegions++;
2525 
2526       if (connections [j * num + i] > 0)
2527         numReferencedByRegions++;
2528 
2529       cs_regions++;
2530       referenced += numReferencedByRegions;
2531     }
2532 
2533     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2534       tty->print("Region " SIZE_FORMAT " is referenced by %d regions {", i, numReferencedByRegions);
2535       int col_count = 0;
2536       for (size_t j = 0; j < num; j++) {
2537         int foo = connections[j * num + i];
2538         if (foo > 0) {
2539           col_count++;
2540           if ((col_count % 10) == 0)
2541             tty->print("\n");
2542           tty->print("" SIZE_FORMAT "(%d), ", j, foo);
2543         }
2544       }
2545       tty->print("} \n");
2546     }
2547   }
2548 
2549   double avg = (double)referenced / (double) cs_regions;
2550   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2551 }
2552 
2553 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2554 private:
2555   size_t _garbage;
2556 public:
2557   ShenandoahCountGarbageClosure() : _garbage(0) {
2558   }
2559 
2560   bool doHeapRegion(ShenandoahHeapRegion* r) {
2561     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2562       _garbage += r->garbage();
2563     }
2564     return false;
2565   }
2566 
2567   size_t garbage() {
2568     return _garbage;
2569   }
2570 };
2571 
2572 size_t ShenandoahHeap::garbage() {
2573   ShenandoahCountGarbageClosure cl;
2574   heap_region_iterate(&cl);
2575   return cl.garbage();
2576 }
2577 
2578 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2579   return _connection_matrix;
2580 }
2581 
2582 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2583   return _partial_gc;
2584 }
2585 
2586 void ShenandoahHeap::do_partial_collection() {
2587   {
2588     ShenandoahHeapLock lock(this);
2589     partial_gc()->prepare();
2590   }
2591   partial_gc()->do_partial_collection();
2592 }
2593 
2594 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2595 private:
2596   ShenandoahHeap* _heap;
2597   ShenandoahHeapRegionSet* _regions;
2598 
2599 public:
2600   ShenandoahUpdateHeapRefsTask() :
2601     AbstractGangTask("Concurrent Update References Task"),
2602     _heap(ShenandoahHeap::heap()),
2603     _regions(ShenandoahHeap::heap()->regions()) {
2604     _regions->clear_current_index();
2605   }
2606 
2607   void work(uint worker_id) {
2608     ShenandoahUpdateHeapRefsClosure cl;
2609     ShenandoahHeapRegion* r = _regions->claim_next();
2610     while (r != NULL && ! _heap->cancelled_concgc()) {
2611       if (! _heap->in_collection_set(r) &&
2612           ! r->is_empty()) {
2613         HeapWord* limit = r->concurrent_iteration_safe_limit();
2614         _heap->marked_object_oop_iterate(r, &cl, limit);
2615       } else if (_heap->in_collection_set(r)) {
2616         HeapWord* bottom = r->bottom();
2617         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2618         if (top > bottom) {
2619           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2620         }
2621       }
2622       r = _regions->claim_next();
2623     }
2624   }
2625 };
2626 
2627 void ShenandoahHeap::concurrent_update_heap_references() {
2628   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs);
2629   ShenandoahUpdateHeapRefsTask task;
2630   workers()->run_task(&task);
2631   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs);
2632 }
2633 
2634 void ShenandoahHeap::prepare_update_refs() {
2635   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2636   set_evacuation_in_progress_at_safepoint(false);
2637   set_update_refs_in_progress(true);
2638   ensure_parsability(true);
2639   connection_matrix()->clear_all();
2640   for (uint i = 0; i < _num_regions; i++) {
2641     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2642     r->set_concurrent_iteration_safe_limit(r->top());
2643   }
2644 }
2645 
2646 void ShenandoahHeap::finish_update_refs() {
2647   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2648 
2649   if (! cancelled_concgc()) {
2650     concurrentMark()->update_roots();
2651     recycle_dirty_regions();
2652     set_need_update_refs(false);
2653 
2654     if (ShenandoahVerify) {
2655       verify_update_refs();
2656     }
2657 
2658   }
2659   set_update_refs_in_progress(false);
2660 }
2661 
2662 class ShenandoahVerifyUpdateRefsClosure : public ExtendedOopClosure {
2663 private:
2664   template <class T>
2665   void do_oop_work(T* p) {
2666     T o = oopDesc::load_heap_oop(p);
2667     if (! oopDesc::is_null(o)) {
2668       oop obj = oopDesc::decode_heap_oop_not_null(o);
2669       guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
2670                 "must not be forwarded");
2671     }
2672   }
2673 public:
2674   void do_oop(oop* p) { do_oop_work(p); }
2675   void do_oop(narrowOop* p) { do_oop_work(p); }
2676 };
2677 
2678 void ShenandoahHeap::verify_update_refs() {
2679 
2680   ensure_parsability(false);
2681 
2682   ShenandoahVerifyUpdateRefsClosure cl;
2683 
2684   // Verify roots.
2685   {
2686     CodeBlobToOopClosure blobsCl(&cl, false);
2687     CLDToOopClosure cldCl(&cl);
2688     ClassLoaderDataGraph::clear_claimed_marks();
2689     ShenandoahRootProcessor rp(this, 1);
2690     rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
2691   }
2692 
2693   // Verify heap.
2694   for (uint i = 0; i < num_regions(); i++) {
2695     ShenandoahHeapRegion* r = regions()->get(i);
2696     marked_object_oop_iterate(r, &cl);
2697   }
2698 }
2699 
2700 #ifdef ASSERT
2701 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2702   assert(_heap_lock == locked, "must be locked");
2703   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2704 }
2705 
2706 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2707   Thread* thr = Thread::current();
2708   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2709          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2710   "must own heap lock or by VM thread at safepoint");
2711 }
2712 
2713 #endif