1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/gcTraceTime.inline.hpp"
  28 #include "gc/shared/parallelCleaning.hpp"
  29 
  30 #include "gc/shenandoah/brooksPointer.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  51 
  52 #ifdef ASSERT
  53 template <class T>
  54 void AssertToSpaceClosure::do_oop_nv(T* p) {
  55   T o = oopDesc::load_heap_oop(p);
  56   if (! oopDesc::is_null(o)) {
  57     oop obj = oopDesc::decode_heap_oop_not_null(o);
  58     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  59            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  60            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  61   }
  62 }
  63 
  64 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  65 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  66 #endif
  67 
  68 const char* ShenandoahHeap::name() const {
  69   return "Shenandoah";
  70 }
  71 
  72 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  73   HeapWord* cur = NULL;
  74   for (cur = start; cur < end; cur++) {
  75     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  76   }
  77 }
  78 
  79 class PrintHeapRegionsClosure : public
  80    ShenandoahHeapRegionClosure {
  81 private:
  82   outputStream* _st;
  83 public:
  84   PrintHeapRegionsClosure() : _st(tty) {}
  85   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  86 
  87   bool doHeapRegion(ShenandoahHeapRegion* r) {
  88     r->print_on(_st);
  89     return false;
  90   }
  91 };
  92 
  93 class ShenandoahPretouchTask : public AbstractGangTask {
  94 private:
  95   ShenandoahHeapRegionSet* _regions;
  96   const size_t _bitmap_size;
  97   const size_t _page_size;
  98   char* _bitmap0_base;
  99   char* _bitmap1_base;
 100 public:
 101   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
 102                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 103                          size_t page_size) :
 104     AbstractGangTask("Shenandoah PreTouch",
 105                      Universe::is_fully_initialized() ? GCId::current_raw() :
 106                                                         // During VM initialization there is
 107                                                         // no GC cycle that this task can be
 108                                                         // associated with.
 109                                                         GCId::undefined()),
 110     _bitmap0_base(bitmap0_base),
 111     _bitmap1_base(bitmap1_base),
 112     _regions(regions),
 113     _bitmap_size(bitmap_size),
 114     _page_size(page_size) {
 115     _regions->clear_current_index();
 116   };
 117 
 118   virtual void work(uint worker_id) {
 119     ShenandoahHeapRegion* r = _regions->claim_next();
 120     while (r != NULL) {
 121       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 122                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 123       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 124 
 125       size_t start = r->region_number()       * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 126       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 127       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 128 
 129       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 130                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 131       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 132 
 133       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 134                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 135       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 136 
 137       r = _regions->claim_next();
 138     }
 139   }
 140 };
 141 
 142 jint ShenandoahHeap::initialize() {
 143   CollectedHeap::pre_initialize();
 144 
 145   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 146   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 147 
 148   Universe::check_alignment(max_byte_size,
 149                             ShenandoahHeapRegion::RegionSizeBytes,
 150                             "shenandoah heap");
 151   Universe::check_alignment(init_byte_size,
 152                             ShenandoahHeapRegion::RegionSizeBytes,
 153                             "shenandoah heap");
 154 
 155   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 156                                                  Arguments::conservative_max_heap_alignment());
 157   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 158 
 159   set_barrier_set(new ShenandoahBarrierSet(this));
 160   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 161   _storage.initialize(pgc_rs, init_byte_size);
 162 
 163   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 164   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 165   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 166   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 167   assert(init_byte_size == _initialSize, "tautology");
 168   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 169   _collection_set = new ShenandoahCollectionSet(_max_regions);
 170   _free_regions = new ShenandoahFreeSet(_max_regions);
 171 
 172   // Initialize fast collection set test structure.
 173   _in_cset_fast_test_length = _max_regions;
 174   _in_cset_fast_test_base =
 175                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 176   _in_cset_fast_test = _in_cset_fast_test_base -
 177                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 178 
 179   _next_top_at_mark_starts_base =
 180                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 181   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 182                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 183 
 184   _complete_top_at_mark_starts_base =
 185                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 186   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 187                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 188 
 189   size_t i = 0;
 190   for (i = 0; i < _num_regions; i++) {
 191     _in_cset_fast_test_base[i] = false; // Not in cset
 192     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 193     _complete_top_at_mark_starts_base[i] = bottom;
 194     _next_top_at_mark_starts_base[i] = bottom;
 195   }
 196 
 197   {
 198     ShenandoahHeapLock lock(this);
 199     for (i = 0; i < _num_regions; i++) {
 200       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 201                                                                regionSizeWords * i, regionSizeWords, i);
 202       _free_regions->add_region(current);
 203       _ordered_regions->add_region(current);
 204     }
 205   }
 206   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 207   _first_region = _ordered_regions->get(0);
 208   _first_region_bottom = _first_region->bottom();
 209   assert((((size_t) _first_region_bottom) &
 210           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 211          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 212 
 213   _numAllocs = 0;
 214 
 215   if (log_is_enabled(Trace, gc, region)) {
 216     ResourceMark rm;
 217     outputStream* out = Log(gc, region)::trace_stream();
 218     log_trace(gc, region)("All Regions");
 219     _ordered_regions->print(out);
 220     log_trace(gc, region)("Free Regions");
 221     _free_regions->print(out);
 222   }
 223 
 224   // The call below uses stuff (the SATB* things) that are in G1, but probably
 225   // belong into a shared location.
 226   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 227                                                SATB_Q_FL_lock,
 228                                                20 /*G1SATBProcessCompletedThreshold */,
 229                                                Shared_SATB_Q_lock);
 230 
 231   // Reserve space for prev and next bitmap.
 232   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 233   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 234 
 235   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 236 
 237   ReservedSpace bitmap0(bitmap_size, page_size);
 238   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 239   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 240   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 241 
 242   ReservedSpace bitmap1(bitmap_size, page_size);
 243   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 244   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 245   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 246 
 247   if (ShenandoahAlwaysPreTouch) {
 248     assert (!AlwaysPreTouch, "Should have been overridden");
 249 
 250     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 251     // before initialize() below zeroes it with initializing thread. For any given region,
 252     // we touch the region and the corresponding bitmaps from the same thread.
 253 
 254     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 255                        _ordered_regions->count(), page_size);
 256     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), bitmap_size, page_size);
 257     _workers->run_task(&cl);
 258   }
 259 
 260   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 261   _complete_mark_bit_map = &_mark_bit_map0;
 262 
 263   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 264   _next_mark_bit_map = &_mark_bit_map1;
 265 
 266   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 267 
 268   _monitoring_support = new ShenandoahMonitoringSupport(this);
 269 
 270   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 271 
 272   ShenandoahMarkCompact::initialize();
 273 
 274   return JNI_OK;
 275 }
 276 
 277 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 278   CollectedHeap(),
 279   _shenandoah_policy(policy),
 280   _concurrent_mark_in_progress(0),
 281   _evacuation_in_progress(0),
 282   _full_gc_in_progress(false),
 283   _free_regions(NULL),
 284   _collection_set(NULL),
 285   _bytes_allocated_since_cm(0),
 286   _bytes_allocated_during_cm(0),
 287   _max_allocated_gc(0),
 288   _allocated_last_gc(0),
 289   _used_start_gc(0),
 290   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 291   _ref_processor(NULL),
 292   _in_cset_fast_test(NULL),
 293   _in_cset_fast_test_base(NULL),
 294   _next_top_at_mark_starts(NULL),
 295   _next_top_at_mark_starts_base(NULL),
 296   _complete_top_at_mark_starts(NULL),
 297   _complete_top_at_mark_starts_base(NULL),
 298   _mark_bit_map0(),
 299   _mark_bit_map1(),
 300   _connection_matrix(NULL),
 301   _cancelled_concgc(false),
 302   _need_update_refs(false),
 303   _need_reset_bitmaps(false),
 304   _heap_lock(0),
 305 #ifdef ASSERT
 306   _heap_lock_owner(NULL),
 307 #endif
 308   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 309 
 310 {
 311   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 312   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 313   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 314 
 315   _scm = new ShenandoahConcurrentMark();
 316   _used = 0;
 317 
 318   _max_workers = MAX2(_max_workers, 1U);
 319   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 320                             /* are_GC_task_threads */true,
 321                             /* are_ConcurrentGC_threads */false);
 322   if (_workers == NULL) {
 323     vm_exit_during_initialization("Failed necessary allocation.");
 324   } else {
 325     _workers->initialize_workers();
 326   }
 327 }
 328 
 329 class ResetNextBitmapTask : public AbstractGangTask {
 330 private:
 331   ShenandoahHeapRegionSet* _regions;
 332 
 333 public:
 334   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 335     AbstractGangTask("Parallel Reset Bitmap Task"),
 336     _regions(regions) {
 337     _regions->clear_current_index();
 338   }
 339 
 340   void work(uint worker_id) {
 341     ShenandoahHeapRegion* region = _regions->claim_next();
 342     ShenandoahHeap* heap = ShenandoahHeap::heap();
 343     while (region != NULL) {
 344       HeapWord* bottom = region->bottom();
 345       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 346       if (top > bottom) {
 347         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 348       }
 349       region = _regions->claim_next();
 350     }
 351   }
 352 };
 353 
 354 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 355   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 356   workers->run_task(&task);
 357 }
 358 
 359 class ResetCompleteBitmapTask : public AbstractGangTask {
 360 private:
 361   ShenandoahHeapRegionSet* _regions;
 362 
 363 public:
 364   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 365     AbstractGangTask("Parallel Reset Bitmap Task"),
 366     _regions(regions) {
 367     _regions->clear_current_index();
 368   }
 369 
 370   void work(uint worker_id) {
 371     ShenandoahHeapRegion* region = _regions->claim_next();
 372     ShenandoahHeap* heap = ShenandoahHeap::heap();
 373     while (region != NULL) {
 374       HeapWord* bottom = region->bottom();
 375       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 376       if (top > bottom) {
 377         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 378       }
 379       region = _regions->claim_next();
 380     }
 381   }
 382 };
 383 
 384 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 385   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 386   workers->run_task(&task);
 387 }
 388 
 389 bool ShenandoahHeap::is_next_bitmap_clear() {
 390   HeapWord* start = _ordered_regions->bottom();
 391   HeapWord* end = _ordered_regions->end();
 392   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 393 }
 394 
 395 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 396   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 397 }
 398 
 399 void ShenandoahHeap::print_on(outputStream* st) const {
 400   st->print("Shenandoah Heap");
 401   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 402   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 403             p2i(reserved_region().start()),
 404             p2i(reserved_region().end()));
 405   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 406   if (_concurrent_mark_in_progress) {
 407     st->print("marking ");
 408   }
 409   if (_evacuation_in_progress) {
 410     st->print("evacuating ");
 411   }
 412   if (cancelled_concgc()) {
 413     st->print("cancelled ");
 414   }
 415   st->print("\n");
 416 
 417   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 418   st->print   ("Virtual space:");
 419   if (_storage.special()) st->print(" (pinned in memory)");
 420   st->cr();
 421   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 422   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 423   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 424   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 425 
 426   if (Verbose) {
 427     print_heap_regions(st);
 428   }
 429 }
 430 
 431 class InitGCLABClosure : public ThreadClosure {
 432 public:
 433   void do_thread(Thread* thread) {
 434     thread->gclab().initialize(true);
 435   }
 436 };
 437 
 438 void ShenandoahHeap::post_initialize() {
 439   if (UseTLAB) {
 440     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 441     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 442     // Their initialization should be handled separately. Is we miss some threads here,
 443     // then any other TLAB-related activity would fail with asserts.
 444 
 445     InitGCLABClosure init_gclabs;
 446     {
 447       MutexLocker ml(Threads_lock);
 448       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 449         init_gclabs.do_thread(thread);
 450       }
 451     }
 452     gc_threads_do(&init_gclabs);
 453 
 454     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 455     // Now, we will let WorkGang to initialize gclab when new worker is created.
 456     _workers->set_initialize_gclab();
 457   }
 458 
 459   _scm->initialize(_max_workers);
 460 
 461   ref_processing_init();
 462 }
 463 
 464 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 465   size_t sum;
 466 public:
 467 
 468   CalculateUsedRegionClosure() {
 469     sum = 0;
 470   }
 471 
 472   bool doHeapRegion(ShenandoahHeapRegion* r) {
 473     sum = sum + r->used();
 474     return false;
 475   }
 476 
 477   size_t getResult() { return sum;}
 478 };
 479 
 480 size_t ShenandoahHeap::calculateUsed() {
 481   CalculateUsedRegionClosure cl;
 482   heap_region_iterate(&cl);
 483   return cl.getResult();
 484 }
 485 
 486 void ShenandoahHeap::verify_heap_size_consistency() {
 487 
 488   assert(calculateUsed() == used(),
 489          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 490 }
 491 
 492 size_t ShenandoahHeap::used() const {
 493   OrderAccess::acquire();
 494   return _used;
 495 }
 496 
 497 void ShenandoahHeap::increase_used(size_t bytes) {
 498   assert_heaplock_or_safepoint();
 499   _used += bytes;
 500 }
 501 
 502 void ShenandoahHeap::set_used(size_t bytes) {
 503   assert_heaplock_or_safepoint();
 504   _used = bytes;
 505 }
 506 
 507 void ShenandoahHeap::decrease_used(size_t bytes) {
 508   assert_heaplock_or_safepoint();
 509   assert(_used >= bytes, "never decrease heap size by more than we've left");
 510   _used -= bytes;
 511 }
 512 
 513 size_t ShenandoahHeap::capacity() const {
 514   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 515 }
 516 
 517 bool ShenandoahHeap::is_maximal_no_gc() const {
 518   Unimplemented();
 519   return true;
 520 }
 521 
 522 size_t ShenandoahHeap::max_capacity() const {
 523   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 524 }
 525 
 526 size_t ShenandoahHeap::min_capacity() const {
 527   return _initialSize;
 528 }
 529 
 530 VirtualSpace* ShenandoahHeap::storage() const {
 531   return (VirtualSpace*) &_storage;
 532 }
 533 
 534 bool ShenandoahHeap::is_in(const void* p) const {
 535   HeapWord* first_region_bottom = _first_region->bottom();
 536   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 537   return p >= _first_region_bottom && p < last_region_end;
 538 }
 539 
 540 bool ShenandoahHeap::is_scavengable(const void* p) {
 541   return true;
 542 }
 543 
 544 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 545   // Retain tlab and allocate object in shared space if
 546   // the amount free in the tlab is too large to discard.
 547   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 548     thread->gclab().record_slow_allocation(size);
 549     return NULL;
 550   }
 551 
 552   // Discard gclab and allocate a new one.
 553   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 554   size_t new_gclab_size = thread->gclab().compute_size(size);
 555 
 556   thread->gclab().clear_before_allocation();
 557 
 558   if (new_gclab_size == 0) {
 559     return NULL;
 560   }
 561 
 562   // Allocate a new GCLAB...
 563   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 564   if (obj == NULL) {
 565     return NULL;
 566   }
 567 
 568   if (ZeroTLAB) {
 569     // ..and clear it.
 570     Copy::zero_to_words(obj, new_gclab_size);
 571   } else {
 572     // ...and zap just allocated object.
 573 #ifdef ASSERT
 574     // Skip mangling the space corresponding to the object header to
 575     // ensure that the returned space is not considered parsable by
 576     // any concurrent GC thread.
 577     size_t hdr_size = oopDesc::header_size();
 578     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 579 #endif // ASSERT
 580   }
 581   thread->gclab().fill(obj, obj + size, new_gclab_size);
 582   return obj;
 583 }
 584 
 585 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 586   return allocate_new_tlab(word_size, false);
 587 }
 588 
 589 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 590   return allocate_new_tlab(word_size, true);
 591 }
 592 
 593 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 594   HeapWord* result = allocate_memory(word_size, evacuating);
 595 
 596   if (result != NULL) {
 597     assert(! in_collection_set(result), "Never allocate in dirty region");
 598     _bytes_allocated_since_cm += word_size * HeapWordSize;
 599 
 600     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 601 
 602   }
 603   return result;
 604 }
 605 
 606 ShenandoahHeap* ShenandoahHeap::heap() {
 607   CollectedHeap* heap = Universe::heap();
 608   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 609   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 610   return (ShenandoahHeap*) heap;
 611 }
 612 
 613 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 614   CollectedHeap* heap = Universe::heap();
 615   return (ShenandoahHeap*) heap;
 616 }
 617 
 618 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 619 
 620   ShenandoahHeapLock heap_lock(this);
 621 
 622   HeapWord* result = allocate_memory_under_lock(word_size);
 623   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 624 
 625   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 626     grow_heap_by(grow_by);
 627     result = allocate_memory_under_lock(word_size);
 628   }
 629 
 630   return result;
 631 }
 632 
 633 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 634   HeapWord* result = NULL;
 635   result = allocate_memory_work(word_size);
 636 
 637   if (!evacuating) {
 638     // Allocation failed, try full-GC, then retry allocation.
 639     //
 640     // It might happen that one of the threads requesting allocation would unblock
 641     // way later after full-GC happened, only to fail the second allocation, because
 642     // other threads have already depleted the free storage. In this case, a better
 643     // strategy would be to try full-GC again.
 644     //
 645     // Lacking the way to detect progress from "collect" call, we are left with blindly
 646     // retrying for some bounded number of times.
 647     // TODO: Poll if Full GC made enough progress to warrant retry.
 648     int tries = 0;
 649     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 650       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 651                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 652       collect(GCCause::_allocation_failure);
 653       result = allocate_memory_work(word_size);
 654     }
 655   }
 656 
 657   // Only update monitoring counters when not calling from a write-barrier.
 658   // Otherwise we might attempt to grab the Service_lock, which we must
 659   // not do when coming from a write-barrier (because the thread might
 660   // already hold the Compile_lock).
 661   if (! evacuating) {
 662     monitoring_support()->update_counters();
 663   }
 664 
 665   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 666                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 667 
 668   return result;
 669 }
 670 
 671 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 672   return evacuating && Thread::current()->is_Java_thread();
 673 }
 674 
 675 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 676   assert_heaplock_owned_by_current_thread();
 677 
 678   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 679     return allocate_large_memory(word_size);
 680   }
 681 
 682   // Not enough memory in free region set.
 683   // Coming out of full GC, it is possible that there is not
 684   // free region available, so current_index may not be valid.
 685   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 686 
 687   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 688 
 689   if (my_current_region == NULL) {
 690     return NULL; // No more room to make a new region. OOM.
 691   }
 692   assert(my_current_region != NULL, "should have a region at this point");
 693 
 694 #ifdef ASSERT
 695   if (in_collection_set(my_current_region)) {
 696     print_heap_regions();
 697   }
 698 #endif
 699   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 700   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 701 
 702   HeapWord* result = my_current_region->allocate(word_size);
 703 
 704   while (result == NULL) {
 705     // 2nd attempt. Try next region.
 706     _free_regions->increase_used(my_current_region->free());
 707     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 708     assert(next_region != my_current_region, "must not get current again");
 709     my_current_region = next_region;
 710 
 711     if (my_current_region == NULL) {
 712       return NULL; // No more room to make a new region. OOM.
 713     }
 714     assert(my_current_region != NULL, "should have a region at this point");
 715     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 716     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 717     result = my_current_region->allocate(word_size);
 718   }
 719 
 720   my_current_region->increase_live_data_words(word_size);
 721   increase_used(word_size * HeapWordSize);
 722   _free_regions->increase_used(word_size * HeapWordSize);
 723   return result;
 724 }
 725 
 726 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 727   assert_heaplock_owned_by_current_thread();
 728 
 729   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 730   if (required_regions > _max_regions) return NULL;
 731 
 732   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 733 
 734   HeapWord* result = NULL;
 735 
 736   if (r != NULL)  {
 737     result = r->bottom();
 738 
 739     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 740                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 741   } else {
 742     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 743                              (words * HeapWordSize) / K, p2i(result));
 744   }
 745 
 746 
 747   return result;
 748 
 749 }
 750 
 751 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 752                                         bool*  gc_overhead_limit_was_exceeded) {
 753 
 754 #ifdef ASSERT
 755   if (ShenandoahVerify && _numAllocs > 1000000) {
 756     _numAllocs = 0;
 757   }
 758   _numAllocs++;
 759 #endif
 760   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 761   HeapWord* result = filler + BrooksPointer::word_size();
 762   if (filler != NULL) {
 763     BrooksPointer::initialize(oop(result));
 764     _bytes_allocated_since_cm += size * HeapWordSize;
 765 
 766     assert(! in_collection_set(result), "never allocate in targetted region");
 767     return result;
 768   } else {
 769     /*
 770     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 771                   size, used(), _bytes_allocated_since_cm);
 772     {
 773       print_heap_regions();
 774       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 775       _free_regions->print();
 776     }
 777     */
 778     return NULL;
 779   }
 780 }
 781 
 782 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 783 private:
 784   ShenandoahHeap* _heap;
 785   Thread* _thread;
 786   public:
 787   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 788     _heap(heap), _thread(Thread::current()) {
 789   }
 790 
 791   void do_object(oop p) {
 792 
 793     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 794 
 795     assert(_heap->is_marked_complete(p), "expect only marked objects");
 796     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 797       _heap->evacuate_object(p, _thread);
 798     }
 799   }
 800 };
 801 
 802 #ifdef ASSERT
 803 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 804 
 805 public:
 806 
 807   void do_object(oop p) {
 808     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 809       oop p_prime = oopDesc::bs()->read_barrier(p);
 810       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 811       if (p->klass() != p_prime->klass()) {
 812         tty->print_cr("copy has different class than original:");
 813         p->klass()->print_on(tty);
 814         p_prime->klass()->print_on(tty);
 815       }
 816       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 817       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 818       assert(p->size() == p_prime->size(), "Should be the same size");
 819       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 820     }
 821   }
 822 };
 823 
 824 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 825   VerifyEvacuatedObjectClosure verify_evacuation;
 826   marked_object_iterate(from_region, &verify_evacuation);
 827 }
 828 #endif
 829 
 830 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 831 
 832   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 833 
 834   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 835 
 836   marked_object_iterate(from_region, &evacuate_region);
 837 
 838 #ifdef ASSERT
 839   if (ShenandoahVerify && ! cancelled_concgc()) {
 840     verify_evacuated_region(from_region);
 841   }
 842 #endif
 843 }
 844 
 845 class ParallelEvacuationTask : public AbstractGangTask {
 846 private:
 847   ShenandoahHeap* _sh;
 848   ShenandoahCollectionSet* _cs;
 849 
 850 public:
 851   ParallelEvacuationTask(ShenandoahHeap* sh,
 852                          ShenandoahCollectionSet* cs) :
 853     AbstractGangTask("Parallel Evacuation Task"),
 854     _cs(cs),
 855     _sh(sh) {}
 856 
 857   void work(uint worker_id) {
 858 
 859     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 860 
 861     while (from_hr != NULL) {
 862       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 863                                     worker_id,
 864                                     from_hr->region_number());
 865 
 866       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 867       _sh->parallel_evacuate_region(from_hr);
 868 
 869       if (_sh->cancelled_concgc()) {
 870         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 871         break;
 872       }
 873       from_hr = _cs->claim_next();
 874     }
 875   }
 876 };
 877 
 878 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 879 private:
 880   ShenandoahHeap* _heap;
 881   size_t _bytes_reclaimed;
 882 public:
 883   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 884 
 885   bool doHeapRegion(ShenandoahHeapRegion* r) {
 886 
 887     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 888 
 889     if (_heap->in_collection_set(r)) {
 890       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 891       _heap->decrease_used(r->used());
 892       _bytes_reclaimed += r->used();
 893       r->recycle();
 894     }
 895 
 896     return false;
 897   }
 898   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 899   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 900 };
 901 
 902 void ShenandoahHeap::recycle_dirty_regions() {
 903   RecycleDirtyRegionsClosure cl;
 904   cl.clear_bytes_reclaimed();
 905 
 906   heap_region_iterate(&cl);
 907 
 908   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 909   if (! cancelled_concgc()) {
 910     clear_cset_fast_test();
 911   }
 912 }
 913 
 914 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 915   return _free_regions;
 916 }
 917 
 918 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 919   _ordered_regions->print(st);
 920 }
 921 
 922 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 923 private:
 924   int _index;
 925   const char* _prefix;
 926 
 927 public:
 928   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 929 
 930 private:
 931   template <class T>
 932   inline void do_oop_work(T* p) {
 933     oop o = oopDesc::load_decode_heap_oop(p);
 934     if (o != NULL) {
 935       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 936         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 937                       _prefix, _index,
 938                       p2i(p), p2i(o),
 939                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 940                       o->klass()->internal_name(), p2i(o->klass()));
 941       } else {
 942         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 943                       _prefix, _index,
 944                       p2i(p), p2i(o));
 945       }
 946     } else {
 947       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 948     }
 949     _index++;
 950   }
 951 
 952 public:
 953   void do_oop(oop* p) {
 954     do_oop_work(p);
 955   }
 956 
 957   void do_oop(narrowOop* p) {
 958     do_oop_work(p);
 959   }
 960 
 961 };
 962 
 963 class PrintAllRefsObjectClosure : public ObjectClosure {
 964   const char* _prefix;
 965 
 966 public:
 967   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 968 
 969   void do_object(oop p) {
 970     if (ShenandoahHeap::heap()->is_in(p)) {
 971         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 972                       _prefix, p2i(p),
 973                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 974                       p->klass()->internal_name(), p2i(p->klass()));
 975         PrintAllRefsOopClosure cl(_prefix);
 976         p->oop_iterate(&cl);
 977       }
 978   }
 979 };
 980 
 981 void ShenandoahHeap::print_all_refs(const char* prefix) {
 982   tty->print_cr("printing all references in the heap");
 983   tty->print_cr("root references:");
 984 
 985   ensure_parsability(false);
 986 
 987   PrintAllRefsOopClosure cl(prefix);
 988   roots_iterate(&cl);
 989 
 990   tty->print_cr("heap references:");
 991   PrintAllRefsObjectClosure cl2(prefix);
 992   object_iterate(&cl2);
 993 }
 994 
 995 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 996 private:
 997   ShenandoahHeap*  _heap;
 998 
 999 public:
1000   VerifyAfterMarkingOopClosure() :
1001     _heap(ShenandoahHeap::heap()) { }
1002 
1003 private:
1004   template <class T>
1005   inline void do_oop_work(T* p) {
1006     oop o = oopDesc::load_decode_heap_oop(p);
1007     if (o != NULL) {
1008       if (! _heap->is_marked_complete(o)) {
1009         _heap->print_heap_regions();
1010         _heap->print_all_refs("post-mark");
1011         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1012                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1013         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1014 
1015         tty->print_cr("oop class: %s", o->klass()->internal_name());
1016         if (_heap->is_in(p)) {
1017           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1018           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1019           referrer->print();
1020           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1021         }
1022         tty->print_cr("heap region containing object:");
1023         _heap->heap_region_containing(o)->print();
1024         tty->print_cr("heap region containing referrer:");
1025         _heap->heap_region_containing(p)->print();
1026         tty->print_cr("heap region containing forwardee:");
1027         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1028       }
1029       assert(o->is_oop(), "oop must be an oop");
1030       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1031       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1032         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1033                       p2i(p),
1034                       BOOL_TO_STR(_heap->in_collection_set(p)),
1035                       p2i(o),
1036                       BOOL_TO_STR(_heap->in_collection_set(o)),
1037                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1038                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1039         tty->print_cr("oop class: %s", o->klass()->internal_name());
1040       }
1041       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1042       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1043       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1044     }
1045   }
1046 
1047 public:
1048   void do_oop(oop* p) {
1049     do_oop_work(p);
1050   }
1051 
1052   void do_oop(narrowOop* p) {
1053     do_oop_work(p);
1054   }
1055 
1056 };
1057 
1058 void ShenandoahHeap::verify_heap_after_marking() {
1059 
1060   verify_heap_size_consistency();
1061 
1062   log_trace(gc)("verifying heap after marking");
1063 
1064   VerifyAfterMarkingOopClosure cl;
1065   roots_iterate(&cl);
1066   ObjectToOopClosure objs(&cl);
1067   object_iterate(&objs);
1068 }
1069 
1070 
1071 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1072   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1073 
1074   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1075   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1076   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1077   uint index = r->region_number();
1078 
1079 
1080   assert(!r->has_live(), "liveness must be zero");
1081 
1082   for(size_t i = 0; i < required_regions; i++) {
1083 
1084     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1085 
1086     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1087            "expect correct humongous start or continuation");
1088 
1089     if (log_is_enabled(Debug, gc, humongous)) {
1090       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1091       ResourceMark rm;
1092       outputStream* out = Log(gc, humongous)::debug_stream();
1093       region->print_on(out);
1094     }
1095 
1096     region->recycle();
1097     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1098   }
1099 }
1100 
1101 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1102 
1103   bool doHeapRegion(ShenandoahHeapRegion* r) {
1104     ShenandoahHeap* heap = ShenandoahHeap::heap();
1105 
1106     if (r->is_humongous_start()) {
1107       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1108       if (! heap->is_marked_complete(humongous_obj)) {
1109 
1110         heap->reclaim_humongous_region_at(r);
1111       }
1112     }
1113     return false;
1114   }
1115 };
1116 
1117 #ifdef ASSERT
1118 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1119   bool doHeapRegion(ShenandoahHeapRegion* r) {
1120     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1121     return false;
1122   }
1123 };
1124 #endif
1125 
1126 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1127   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1128 
1129   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1130 
1131   if (!cancelled_concgc()) {
1132 
1133     recycle_dirty_regions();
1134 
1135     ensure_parsability(true);
1136 
1137 #ifdef ASSERT
1138     if (ShenandoahVerify) {
1139       verify_heap_after_marking();
1140     }
1141 
1142     if (UseShenandoahMatrix) {
1143       if (PrintShenandoahMatrix) {
1144         connection_matrix()->print_on(tty);
1145       }
1146       if (VerifyShenandoahMatrix) {
1147         verify_matrix();
1148       }
1149     }
1150 #endif
1151 
1152     // NOTE: This needs to be done during a stop the world pause, because
1153     // putting regions into the collection set concurrently with Java threads
1154     // will create a race. In particular, acmp could fail because when we
1155     // resolve the first operand, the containing region might not yet be in
1156     // the collection set, and thus return the original oop. When the 2nd
1157     // operand gets resolved, the region could be in the collection set
1158     // and the oop gets evacuated. If both operands have originally been
1159     // the same, we get false negatives.
1160 
1161     {
1162       ShenandoahHeapLock lock(this);
1163       _collection_set->clear();
1164       _free_regions->clear();
1165 
1166       ShenandoahReclaimHumongousRegionsClosure reclaim;
1167       heap_region_iterate(&reclaim);
1168 
1169 #ifdef ASSERT
1170       CheckCollectionSetClosure ccsc;
1171       _ordered_regions->heap_region_iterate(&ccsc);
1172 #endif
1173 
1174       _shenandoah_policy->choose_collection_set(_collection_set);
1175 
1176       _shenandoah_policy->choose_free_set(_free_regions);
1177     }
1178 
1179     _bytes_allocated_since_cm = 0;
1180 
1181     Universe::update_heap_info_at_gc();
1182   }
1183 }
1184 
1185 
1186 class RetireTLABClosure : public ThreadClosure {
1187 private:
1188   bool _retire;
1189 
1190 public:
1191   RetireTLABClosure(bool retire) : _retire(retire) {
1192   }
1193 
1194   void do_thread(Thread* thread) {
1195     thread->gclab().make_parsable(_retire);
1196   }
1197 };
1198 
1199 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1200   if (UseTLAB) {
1201     CollectedHeap::ensure_parsability(retire_tlabs);
1202     RetireTLABClosure cl(retire_tlabs);
1203     Threads::threads_do(&cl);
1204   }
1205 }
1206 
1207 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1208 private:
1209   ShenandoahHeap* _heap;
1210   Thread* _thread;
1211 public:
1212   ShenandoahEvacuateUpdateRootsClosure() :
1213     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1214   }
1215 
1216 private:
1217   template <class T>
1218   void do_oop_work(T* p) {
1219     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1220 
1221     T o = oopDesc::load_heap_oop(p);
1222     if (! oopDesc::is_null(o)) {
1223       oop obj = oopDesc::decode_heap_oop_not_null(o);
1224       if (_heap->in_collection_set(obj)) {
1225         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1226                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1227         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1228         if (oopDesc::unsafe_equals(resolved, obj)) {
1229           resolved = _heap->evacuate_object(obj, _thread);
1230         }
1231         oopDesc::encode_store_heap_oop(p, resolved);
1232       }
1233     }
1234 #ifdef ASSERT
1235     else {
1236       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1237       //               p2i(p),
1238       //               p2i((HeapWord*) obj),
1239       //               BOOL_TO_STR(_heap->is_in(obj)),
1240       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1241       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1242     }
1243 #endif
1244   }
1245 
1246 public:
1247   void do_oop(oop* p) {
1248     do_oop_work(p);
1249   }
1250   void do_oop(narrowOop* p) {
1251     do_oop_work(p);
1252   }
1253 };
1254 
1255 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1256   ShenandoahRootEvacuator* _rp;
1257 public:
1258 
1259   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1260     AbstractGangTask("Shenandoah evacuate and update roots"),
1261     _rp(rp)
1262   {
1263     // Nothing else to do.
1264   }
1265 
1266   void work(uint worker_id) {
1267     ShenandoahEvacuateUpdateRootsClosure cl;
1268     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1269 
1270     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1271   }
1272 };
1273 
1274 class ShenandoahFixRootsTask : public AbstractGangTask {
1275   ShenandoahRootEvacuator* _rp;
1276 public:
1277 
1278   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1279     AbstractGangTask("Shenandoah update roots"),
1280     _rp(rp)
1281   {
1282     // Nothing else to do.
1283   }
1284 
1285   void work(uint worker_id) {
1286     SCMUpdateRefsClosure cl;
1287     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1288 
1289     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1290   }
1291 };
1292 void ShenandoahHeap::evacuate_and_update_roots() {
1293 
1294   COMPILER2_PRESENT(DerivedPointerTable::clear());
1295 
1296 #ifdef ASSERT
1297   if (ShenandoahVerifyReadsToFromSpace) {
1298     set_from_region_protection(false);
1299   }
1300 #endif
1301 
1302   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1303   ClassLoaderDataGraph::clear_claimed_marks();
1304 
1305   {
1306     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1307     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1308     workers()->run_task(&roots_task);
1309   }
1310 
1311   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1312 
1313   if (cancelled_concgc()) {
1314     // If initial evacuation has been cancelled, we need to update all references
1315     // after all workers have finished. Otherwise we might run into the following problem:
1316     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1317     // GC thread 2 evacuates the same object X to to-space
1318     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1319     // clear() and update_pointers() must always be called in pairs,
1320     // cannot nest with above clear()/update_pointers().
1321     COMPILER2_PRESENT(DerivedPointerTable::clear());
1322     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1323     ShenandoahFixRootsTask update_roots_task(&rp);
1324     workers()->run_task(&update_roots_task);
1325     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1326   }
1327 
1328 #ifdef ASSERT
1329   if (ShenandoahVerifyReadsToFromSpace) {
1330     set_from_region_protection(true);
1331   }
1332 #endif
1333 
1334 #ifdef ASSERT
1335   {
1336     AssertToSpaceClosure cl;
1337     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1338     ShenandoahRootEvacuator rp(this, 1);
1339     rp.process_evacuate_roots(&cl, &code_cl, 0);
1340   }
1341 #endif
1342 }
1343 
1344 
1345 void ShenandoahHeap::do_evacuation() {
1346 
1347   parallel_evacuate();
1348 
1349   if (ShenandoahVerify && ! cancelled_concgc()) {
1350     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1351     if (Thread::current()->is_VM_thread()) {
1352       verify_after_evacuation.doit();
1353     } else {
1354       VMThread::execute(&verify_after_evacuation);
1355     }
1356   }
1357 
1358 }
1359 
1360 void ShenandoahHeap::parallel_evacuate() {
1361   log_develop_trace(gc)("starting parallel_evacuate");
1362 
1363   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1364 
1365   if (log_is_enabled(Trace, gc, region)) {
1366     ResourceMark rm;
1367     outputStream *out = Log(gc, region)::trace_stream();
1368     out->print("Printing all available regions");
1369     print_heap_regions(out);
1370   }
1371 
1372   if (log_is_enabled(Trace, gc, cset)) {
1373     ResourceMark rm;
1374     outputStream *out = Log(gc, cset)::trace_stream();
1375     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1376     _collection_set->print(out);
1377 
1378     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1379     _free_regions->print(out);
1380   }
1381 
1382   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1383 
1384 
1385   workers()->run_task(&evacuationTask);
1386 
1387   if (log_is_enabled(Trace, gc, cset)) {
1388     ResourceMark rm;
1389     outputStream *out = Log(gc, cset)::trace_stream();
1390     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1391                _collection_set->count());
1392 
1393     _collection_set->print(out);
1394 
1395     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1396                _free_regions->count());
1397     _free_regions->print(out);
1398 
1399   }
1400 
1401   if (log_is_enabled(Trace, gc, region)) {
1402     ResourceMark rm;
1403     outputStream *out = Log(gc, region)::trace_stream();
1404     out->print_cr("all regions after evacuation:");
1405     print_heap_regions(out);
1406   }
1407 
1408   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1409 }
1410 
1411 class VerifyEvacuationClosure: public ExtendedOopClosure {
1412 private:
1413   ShenandoahHeap*  _heap;
1414   ShenandoahHeapRegion* _from_region;
1415 
1416 public:
1417   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1418     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1419 private:
1420   template <class T>
1421   inline void do_oop_work(T* p) {
1422     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1423     if (! oopDesc::is_null(heap_oop)) {
1424       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1425     }
1426   }
1427 
1428 public:
1429   void do_oop(oop* p)       {
1430     do_oop_work(p);
1431   }
1432 
1433   void do_oop(narrowOop* p) {
1434     do_oop_work(p);
1435   }
1436 
1437 };
1438 
1439 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1440 
1441   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1442 
1443   CodeBlobToOopClosure blobsCl(cl, false);
1444   CLDToOopClosure cldCl(cl);
1445 
1446   ClassLoaderDataGraph::clear_claimed_marks();
1447 
1448   ShenandoahRootProcessor rp(this, 1);
1449   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1450 }
1451 
1452 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1453 
1454   VerifyEvacuationClosure rootsCl(from_region);
1455   roots_iterate(&rootsCl);
1456 
1457 }
1458 
1459 bool ShenandoahHeap::supports_tlab_allocation() const {
1460   return true;
1461 }
1462 
1463 
1464 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1465   size_t idx = _free_regions->current_index();
1466   ShenandoahHeapRegion* current = _free_regions->get(idx);
1467   if (current == NULL) {
1468     return 0;
1469   } else if (current->free() > MinTLABSize) {
1470     // Current region has enough space left, can use it.
1471     return current->free();
1472   } else {
1473     // No more space in current region, we will take next free region
1474     // on the next TLAB allocation.
1475     return ShenandoahHeapRegion::RegionSizeBytes;
1476   }
1477 }
1478 
1479 size_t ShenandoahHeap::max_tlab_size() const {
1480   return ShenandoahHeapRegion::RegionSizeBytes;
1481 }
1482 
1483 class ResizeGCLABClosure : public ThreadClosure {
1484 public:
1485   void do_thread(Thread* thread) {
1486     thread->gclab().resize();
1487   }
1488 };
1489 
1490 void ShenandoahHeap::resize_all_tlabs() {
1491   CollectedHeap::resize_all_tlabs();
1492 
1493   ResizeGCLABClosure cl;
1494   Threads::threads_do(&cl);
1495 }
1496 
1497 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1498 public:
1499   void do_thread(Thread* thread) {
1500     thread->gclab().accumulate_statistics();
1501     thread->gclab().initialize_statistics();
1502   }
1503 };
1504 
1505 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1506   AccumulateStatisticsGCLABClosure cl;
1507   Threads::threads_do(&cl);
1508 }
1509 
1510 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1511   return true;
1512 }
1513 
1514 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1515   // Overridden to do nothing.
1516   return new_obj;
1517 }
1518 
1519 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1520   return true;
1521 }
1522 
1523 bool ShenandoahHeap::card_mark_must_follow_store() const {
1524   return false;
1525 }
1526 
1527 void ShenandoahHeap::collect(GCCause::Cause cause) {
1528   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1529   if (GCCause::is_user_requested_gc(cause)) {
1530     if (! DisableExplicitGC) {
1531       _concurrent_gc_thread->do_full_gc(cause);
1532     }
1533   } else if (cause == GCCause::_allocation_failure) {
1534     collector_policy()->set_should_clear_all_soft_refs(true);
1535     _concurrent_gc_thread->do_full_gc(cause);
1536   }
1537 }
1538 
1539 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1540   //assert(false, "Shouldn't need to do full collections");
1541 }
1542 
1543 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1544   Unimplemented();
1545   return NULL;
1546 
1547 }
1548 
1549 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1550   return _shenandoah_policy;
1551 }
1552 
1553 
1554 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1555   Space* sp = heap_region_containing(addr);
1556   if (sp != NULL) {
1557     return sp->block_start(addr);
1558   }
1559   return NULL;
1560 }
1561 
1562 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1563   Space* sp = heap_region_containing(addr);
1564   assert(sp != NULL, "block_size of address outside of heap");
1565   return sp->block_size(addr);
1566 }
1567 
1568 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1569   Space* sp = heap_region_containing(addr);
1570   return sp->block_is_obj(addr);
1571 }
1572 
1573 jlong ShenandoahHeap::millis_since_last_gc() {
1574   return 0;
1575 }
1576 
1577 void ShenandoahHeap::prepare_for_verify() {
1578   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1579     ensure_parsability(false);
1580   }
1581 }
1582 
1583 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1584   workers()->print_worker_threads_on(st);
1585 }
1586 
1587 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1588   workers()->threads_do(tcl);
1589 }
1590 
1591 void ShenandoahHeap::print_tracing_info() const {
1592   if (log_is_enabled(Info, gc, stats)) {
1593     ResourceMark rm;
1594     outputStream* out = Log(gc, stats)::info_stream();
1595     _shenandoah_policy->print_tracing_info(out);
1596   }
1597 }
1598 
1599 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1600 private:
1601   ShenandoahHeap*  _heap;
1602   VerifyOption     _vo;
1603   bool             _failures;
1604 public:
1605   // _vo == UsePrevMarking -> use "prev" marking information,
1606   // _vo == UseNextMarking -> use "next" marking information,
1607   // _vo == UseMarkWord    -> use mark word from object header.
1608   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1609     _heap(ShenandoahHeap::heap()),
1610     _vo(vo),
1611     _failures(false) { }
1612 
1613   bool failures() { return _failures; }
1614 
1615 private:
1616   template <class T>
1617   inline void do_oop_work(T* p) {
1618     oop obj = oopDesc::load_decode_heap_oop(p);
1619     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1620       { // Just for debugging.
1621         tty->print_cr("Root location "PTR_FORMAT
1622                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1623         //      obj->print_on(tty);
1624       }
1625     }
1626     guarantee(obj->is_oop_or_null(), "is oop or null");
1627   }
1628 
1629 public:
1630   void do_oop(oop* p)       {
1631     do_oop_work(p);
1632   }
1633 
1634   void do_oop(narrowOop* p) {
1635     do_oop_work(p);
1636   }
1637 
1638 };
1639 
1640 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1641 private:
1642   ShenandoahVerifyRootsClosure _rootsCl;
1643 public:
1644   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1645     _rootsCl(rc) {};
1646 
1647   void do_object(oop p) {
1648     _rootsCl.do_oop(&p);
1649   }
1650 };
1651 
1652 class ShenandoahVerifyKlassClosure: public KlassClosure {
1653   OopClosure *_oop_closure;
1654  public:
1655   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1656   void do_klass(Klass* k) {
1657     k->oops_do(_oop_closure);
1658   }
1659 };
1660 
1661 void ShenandoahHeap::verify(VerifyOption vo) {
1662   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1663 
1664     ShenandoahVerifyRootsClosure rootsCl(vo);
1665 
1666     assert(Thread::current()->is_VM_thread(),
1667            "Expected to be executed serially by the VM thread at this point");
1668 
1669     roots_iterate(&rootsCl);
1670 
1671     bool failures = rootsCl.failures();
1672     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1673 
1674     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1675 
1676     object_iterate(&heapCl);
1677     // TODO: Implement rest of it.
1678   } else {
1679     tty->print("(SKIPPING roots, heapRegions, remset) ");
1680   }
1681 }
1682 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1683   return _free_regions->capacity();
1684 }
1685 
1686 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1687   ObjectClosure* _cl;
1688 public:
1689   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1690   bool doHeapRegion(ShenandoahHeapRegion* r) {
1691     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1692     return false;
1693   }
1694 };
1695 
1696 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1697   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1698   heap_region_iterate(&blk, false, true);
1699 }
1700 
1701 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1702 private:
1703   ShenandoahHeap* _heap;
1704 
1705 public:
1706   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1707 
1708 private:
1709   template <class T>
1710   inline void do_oop_work(T* p) {
1711     T o = oopDesc::load_heap_oop(p);
1712     if (!oopDesc::is_null(o)) {
1713       oop obj = oopDesc::decode_heap_oop_not_null(o);
1714       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1715     }
1716   }
1717 public:
1718   void do_oop(oop* p) {
1719     do_oop_work(p);
1720   }
1721   void do_oop(narrowOop* p) {
1722     do_oop_work(p);
1723   }
1724 };
1725 
1726 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1727 private:
1728   ObjectClosure* _cl;
1729 public:
1730   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1731 
1732   virtual void do_object(oop obj) {
1733     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1734             "avoid double-counting: only non-forwarded objects here");
1735 
1736     // Fix up the ptrs.
1737     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1738     obj->oop_iterate(&adjust_ptrs);
1739 
1740     // Can reply the object now:
1741     _cl->do_object(obj);
1742   }
1743 };
1744 
1745 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1746   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1747 
1748   // Safe iteration does objects only with correct references.
1749   // This is why we skip dirty regions that have stale copies of objects,
1750   // and fix up the pointers in the returned objects.
1751 
1752   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1753   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1754   heap_region_iterate(&blk,
1755                       /* skip_dirty_regions = */ true,
1756                       /* skip_humongous_continuations = */ true);
1757 
1758   _need_update_refs = false; // already updated the references
1759 }
1760 
1761 // Apply blk->doHeapRegion() on all committed regions in address order,
1762 // terminating the iteration early if doHeapRegion() returns true.
1763 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1764   for (size_t i = 0; i < _num_regions; i++) {
1765     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1766     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1767       continue;
1768     }
1769     if (skip_dirty_regions && in_collection_set(current)) {
1770       continue;
1771     }
1772     if (blk->doHeapRegion(current)) {
1773       return;
1774     }
1775   }
1776 }
1777 
1778 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1779   ShenandoahHeap* sh;
1780 public:
1781   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1782 
1783   bool doHeapRegion(ShenandoahHeapRegion* r) {
1784     r->clear_live_data();
1785     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1786     return false;
1787   }
1788 };
1789 
1790 void ShenandoahHeap::start_concurrent_marking() {
1791 
1792   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1793   accumulate_statistics_all_tlabs();
1794   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1795 
1796   set_concurrent_mark_in_progress(true);
1797   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1798   if (UseTLAB) {
1799     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1800     ensure_parsability(true);
1801     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1802   }
1803 
1804   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1805   _used_start_gc = used();
1806 
1807 #ifdef ASSERT
1808   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1809     ensure_parsability(false);
1810     print_all_refs("pre-mark");
1811   }
1812 #endif
1813 
1814   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1815   ClearLivenessClosure clc(this);
1816   heap_region_iterate(&clc);
1817   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1818 
1819   if (UseShenandoahMatrix) {
1820     connection_matrix()->clear_all();
1821   }
1822   // print_all_refs("pre -mark");
1823 
1824   // oopDesc::_debug = true;
1825 
1826   // Make above changes visible to worker threads
1827   OrderAccess::fence();
1828 
1829   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1830   concurrentMark()->init_mark_roots();
1831   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1832 
1833   //  print_all_refs("pre-mark2");
1834 }
1835 
1836 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1837 
1838   ShenandoahHeap* _sh;
1839 
1840 public:
1841   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1842 
1843   template<class T> void do_oop_nv(T* p) {
1844     T heap_oop = oopDesc::load_heap_oop(p);
1845     if (!oopDesc::is_null(heap_oop)) {
1846       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1847       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1848                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1849                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1850                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1851                 obj->klass()->external_name(),
1852                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1853                 );
1854       obj = oopDesc::bs()->read_barrier(obj);
1855       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1856       guarantee(obj->is_oop(), "is_oop");
1857       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1858     }
1859   }
1860 
1861   void do_oop(oop* p)       { do_oop_nv(p); }
1862   void do_oop(narrowOop* p) { do_oop_nv(p); }
1863 
1864 };
1865 
1866 void ShenandoahHeap::verify_heap_after_evacuation() {
1867 
1868   verify_heap_size_consistency();
1869 
1870   ensure_parsability(false);
1871 
1872   VerifyAfterEvacuationClosure cl;
1873   roots_iterate(&cl);
1874 
1875   ObjectToOopClosure objs(&cl);
1876   object_iterate(&objs);
1877 
1878 }
1879 
1880 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1881 public:
1882   bool doHeapRegion(ShenandoahHeapRegion* r) {
1883     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1884     return false;
1885   }
1886 };
1887 
1888 void ShenandoahHeap::swap_mark_bitmaps() {
1889   // Swap bitmaps.
1890   CMBitMap* tmp1 = _complete_mark_bit_map;
1891   _complete_mark_bit_map = _next_mark_bit_map;
1892   _next_mark_bit_map = tmp1;
1893 
1894   // Swap top-at-mark-start pointers
1895   HeapWord** tmp2 = _complete_top_at_mark_starts;
1896   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1897   _next_top_at_mark_starts = tmp2;
1898 
1899   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1900   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1901   _next_top_at_mark_starts_base = tmp3;
1902 }
1903 
1904 class ShenandoahVerifyMatrixOopClosure : public ExtendedOopClosure {
1905 private:
1906   oop _obj;
1907 
1908   template <class T>
1909   inline void do_oop_nv(T* p) {
1910     T o = oopDesc::load_heap_oop(p);
1911     if (! oopDesc::is_null(o)) {
1912       oop obj = oopDesc::decode_heap_oop_not_null(o);
1913       ShenandoahHeap* heap = ShenandoahHeap::heap();
1914       guarantee(heap->is_marked_complete(obj), "must be marked");
1915 
1916       uint from_idx = heap->heap_region_index_containing(p);
1917       uint to_idx = heap->heap_region_index_containing(obj);
1918       if (!heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1919         tty->print_cr("from-obj: ");
1920         _obj->print_on(tty);
1921         tty->print_cr("to-obj:");
1922         obj->print_on(tty);
1923         tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1924         tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1925         tty->print_cr("from-obj marked: %s", BOOL_TO_STR(heap->is_marked_complete(_obj)));
1926         tty->print_cr("to-obj marked: %s", BOOL_TO_STR(heap->is_marked_complete(obj)));
1927         tty->print_cr("from-idx: %u, to-idx: %u", from_idx, to_idx);
1928 
1929         oop fwd_from = BrooksPointer::forwardee(_obj);
1930         oop fwd_to = BrooksPointer::forwardee(obj);
1931         tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1932         tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1933         tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(heap->is_marked_complete(fwd_from)));
1934         tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(heap->is_marked_complete(fwd_to)));
1935         uint fwd_from_idx = heap->heap_region_index_containing(fwd_from);
1936         uint fwd_to_idx = heap->heap_region_index_containing(fwd_to);
1937         tty->print_cr("forward(from-idx): %u, forward(to-idx): %u", fwd_from_idx, fwd_to_idx);
1938         tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1939         tty->print_cr("sizeof(bool): %lu", sizeof(bool));
1940       }
1941       guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1942       guarantee(heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1943     }
1944   }
1945 
1946 public:
1947   ShenandoahVerifyMatrixOopClosure(oop obj) : _obj(obj) {}
1948 
1949   void do_oop(oop* o) {
1950     do_oop_nv(o);
1951   }
1952 
1953   void do_oop(narrowOop* o) {
1954     do_oop_nv(o);
1955   }
1956 };
1957 
1958 class ShenandoahVerifyMatrixObjectClosure : public ObjectClosure {
1959 public:
1960   void do_object(oop obj) {
1961     guarantee(ShenandoahHeap::heap()->is_marked_complete(obj), "must be marked");
1962     ShenandoahVerifyMatrixOopClosure cl(obj);
1963     obj->oop_iterate(&cl);
1964   }
1965 
1966 };
1967 
1968 class ShenandoahVerifyMatrixRegionClosure : public ShenandoahHeapRegionClosure {
1969   bool doHeapRegion(ShenandoahHeapRegion* r) {
1970     ShenandoahVerifyMatrixObjectClosure cl;
1971     ShenandoahHeap::heap()->marked_object_iterate(r, &cl);
1972     return false;
1973   }
1974 };
1975 
1976 void ShenandoahHeap::verify_matrix() {
1977   OrderAccess::fence();
1978   ensure_parsability(false);
1979   ShenandoahVerifyMatrixRegionClosure cl;
1980   heap_region_iterate(&cl, true, true);
1981 }
1982 
1983 void ShenandoahHeap::stop_concurrent_marking() {
1984   assert(concurrent_mark_in_progress(), "How else could we get here?");
1985   if (! cancelled_concgc()) {
1986     // If we needed to update refs, and concurrent marking has been cancelled,
1987     // we need to finish updating references.
1988     set_need_update_refs(false);
1989     swap_mark_bitmaps();
1990   }
1991   set_concurrent_mark_in_progress(false);
1992 
1993   if (log_is_enabled(Trace, gc, region)) {
1994     ResourceMark rm;
1995     outputStream* out = Log(gc, region)::trace_stream();
1996     print_heap_regions(out);
1997   }
1998 
1999 }
2000 
2001 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2002   _concurrent_mark_in_progress = in_progress ? 1 : 0;
2003   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2004 }
2005 
2006 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
2007   // Note: it is important to first release the _evacuation_in_progress flag here,
2008   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
2009   // in case a VM task is pending.
2010   set_evacuation_in_progress(in_progress);
2011   MutexLocker mu(Threads_lock);
2012   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2013 }
2014 
2015 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
2016   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
2017   set_evacuation_in_progress(in_progress);
2018   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2019 }
2020 
2021 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2022   _evacuation_in_progress = in_progress ? 1 : 0;
2023   OrderAccess::fence();
2024 }
2025 
2026 void ShenandoahHeap::verify_copy(oop p,oop c){
2027     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2028     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2029     if (p->klass() != c->klass()) {
2030       print_heap_regions();
2031     }
2032     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2033     assert(p->size() == c->size(), "verify size");
2034     // Object may have been locked between copy and verification
2035     //    assert(p->mark() == c->mark(), "verify mark");
2036     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2037   }
2038 
2039 void ShenandoahHeap::oom_during_evacuation() {
2040   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2041                         Thread::current()->osthread()->thread_id());
2042 
2043   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2044   collector_policy()->set_should_clear_all_soft_refs(true);
2045   concurrent_thread()->try_set_full_gc();
2046   cancel_concgc(_oom_evacuation);
2047 
2048   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2049     assert(! Threads_lock->owned_by_self()
2050            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2051     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2052     while (_evacuation_in_progress) { // wait.
2053       Thread::current()->_ParkEvent->park(1);
2054     }
2055   }
2056 
2057 }
2058 
2059 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2060   // Initialize Brooks pointer for the next object
2061   HeapWord* result = obj + BrooksPointer::word_size();
2062   BrooksPointer::initialize(oop(result));
2063   return result;
2064 }
2065 
2066 uint ShenandoahHeap::oop_extra_words() {
2067   return BrooksPointer::word_size();
2068 }
2069 
2070 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2071   size_t base = _num_regions;
2072   ensure_new_regions(num_regions);
2073   for (size_t i = 0; i < num_regions; i++) {
2074     size_t new_region_index = i + base;
2075     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
2076     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
2077 
2078     if (log_is_enabled(Trace, gc, region)) {
2079       ResourceMark rm;
2080       outputStream* out = Log(gc, region)::trace_stream();
2081       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2082       new_region->print_on(out);
2083     }
2084 
2085     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2086     _ordered_regions->add_region(new_region);
2087     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2088     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2089     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2090 
2091     _free_regions->add_region(new_region);
2092   }
2093 }
2094 
2095 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2096 
2097   size_t num_regions = _num_regions;
2098   size_t new_num_regions = num_regions + new_regions;
2099   assert(new_num_regions <= _max_regions, "we checked this earlier");
2100 
2101   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
2102   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2103   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2104   assert(success, "should always be able to expand by requested size");
2105 
2106   _num_regions = new_num_regions;
2107 
2108 }
2109 
2110 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2111   _heap(ShenandoahHeap::heap_no_check()) {
2112 }
2113 
2114 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2115   _heap = heap;
2116 }
2117 
2118 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2119 
2120   assert(_heap != NULL, "sanity");
2121   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2122 #ifdef ASSERT
2123   if (_heap->concurrent_mark_in_progress()) {
2124     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2125   }
2126 #endif
2127   assert(!oopDesc::is_null(obj), "null");
2128   return _heap->is_marked_next(obj);
2129 }
2130 
2131 void ShenandoahHeap::ref_processing_init() {
2132   MemRegion mr = reserved_region();
2133 
2134   isAlive.init(ShenandoahHeap::heap());
2135   assert(_max_workers > 0, "Sanity");
2136 
2137   _ref_processor =
2138     new ReferenceProcessor(mr,    // span
2139                            ParallelRefProcEnabled,
2140                            // mt processing
2141                            _max_workers,
2142                            // degree of mt processing
2143                            true,
2144                            // mt discovery
2145                            _max_workers,
2146                            // degree of mt discovery
2147                            false,
2148                            // Reference discovery is not atomic
2149                            &isAlive);
2150 }
2151 
2152 #ifdef ASSERT
2153 void ShenandoahHeap::set_from_region_protection(bool protect) {
2154   for (uint i = 0; i < _num_regions; i++) {
2155     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2156     if (region != NULL && in_collection_set(region)) {
2157       if (protect) {
2158         region->memProtectionOn();
2159       } else {
2160         region->memProtectionOff();
2161       }
2162     }
2163   }
2164 }
2165 #endif
2166 
2167 size_t ShenandoahHeap::num_regions() {
2168   return _num_regions;
2169 }
2170 
2171 size_t ShenandoahHeap::max_regions() {
2172   return _max_regions;
2173 }
2174 
2175 GCTracer* ShenandoahHeap::tracer() {
2176   return shenandoahPolicy()->tracer();
2177 }
2178 
2179 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2180   return _free_regions->used();
2181 }
2182 
2183 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2184   if (try_cancel_concgc()) {
2185     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2186     _shenandoah_policy->report_concgc_cancelled();
2187   }
2188 }
2189 
2190 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2191   if (try_cancel_concgc()) {
2192     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2193     _shenandoah_policy->report_concgc_cancelled();
2194   }
2195 }
2196 
2197 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2198   switch (cause) {
2199     case _oom_evacuation:
2200       return "Out of memory for evacuation";
2201     case _vm_stop:
2202       return "Stopping VM";
2203     default:
2204       return "Unknown";
2205   }
2206 }
2207 
2208 void ShenandoahHeap::clear_cancelled_concgc() {
2209   set_cancelled_concgc(false);
2210 }
2211 
2212 uint ShenandoahHeap::max_workers() {
2213   return _max_workers;
2214 }
2215 
2216 void ShenandoahHeap::stop() {
2217   // The shutdown sequence should be able to terminate when GC is running.
2218 
2219   // Step 1. Notify control thread that we are in shutdown.
2220   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2221   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2222   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2223 
2224   // Step 2. Notify GC workers that we are cancelling GC.
2225   cancel_concgc(_vm_stop);
2226 
2227   // Step 3. Wait until GC worker exits normally.
2228   _concurrent_gc_thread->stop();
2229 }
2230 
2231 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2232 
2233   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2234   workers()->run_task(&shenandoah_unlink_task);
2235 
2236   //  if (G1StringDedup::is_enabled()) {
2237   //    G1StringDedup::unlink(is_alive);
2238   //  }
2239 }
2240 
2241 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2242   _need_update_refs = need_update_refs;
2243 }
2244 
2245 //fixme this should be in heapregionset
2246 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2247   size_t region_idx = r->region_number() + 1;
2248   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2249   guarantee(next->region_number() == region_idx, "region number must match");
2250   while (next->is_humongous()) {
2251     region_idx = next->region_number() + 1;
2252     next = _ordered_regions->get(region_idx);
2253     guarantee(next->region_number() == region_idx, "region number must match");
2254   }
2255   return next;
2256 }
2257 
2258 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2259   _in_cset_fast_test_base[region_index] = b;
2260 }
2261 
2262 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2263   return _monitoring_support;
2264 }
2265 
2266 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2267   return _complete_mark_bit_map;
2268 }
2269 
2270 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2271   return _next_mark_bit_map;
2272 }
2273 
2274 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2275   _free_regions->add_region(r);
2276 }
2277 
2278 void ShenandoahHeap::clear_free_regions() {
2279   _free_regions->clear();
2280 }
2281 
2282 address ShenandoahHeap::in_cset_fast_test_addr() {
2283   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2284 }
2285 
2286 address ShenandoahHeap::cancelled_concgc_addr() {
2287   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2288 }
2289 
2290 void ShenandoahHeap::clear_cset_fast_test() {
2291   assert(_in_cset_fast_test_base != NULL, "sanity");
2292   memset(_in_cset_fast_test_base, false,
2293          _in_cset_fast_test_length * sizeof(bool));
2294 }
2295 
2296 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2297   return ShenandoahMaxRegionSize;
2298 }
2299 
2300 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2301   return _bytes_allocated_since_cm;
2302 }
2303 
2304 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2305   _bytes_allocated_since_cm = bytes;
2306 }
2307 
2308 size_t ShenandoahHeap::max_allocated_gc() {
2309   return _max_allocated_gc;
2310 }
2311 
2312 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2313   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2314   _next_top_at_mark_starts[index] = addr;
2315 }
2316 
2317 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2318   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2319   return _next_top_at_mark_starts[index];
2320 }
2321 
2322 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2323   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2324   _complete_top_at_mark_starts[index] = addr;
2325 }
2326 
2327 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2328   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2329   return _complete_top_at_mark_starts[index];
2330 }
2331 
2332 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2333   _full_gc_in_progress = in_progress;
2334 }
2335 
2336 bool ShenandoahHeap::is_full_gc_in_progress() const {
2337   return _full_gc_in_progress;
2338 }
2339 
2340 class NMethodOopInitializer : public OopClosure {
2341 private:
2342   ShenandoahHeap* _heap;
2343 public:
2344   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2345   }
2346 
2347 private:
2348   template <class T>
2349   inline void do_oop_work(T* p) {
2350     T o = oopDesc::load_heap_oop(p);
2351     if (! oopDesc::is_null(o)) {
2352       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2353       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2354       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2355         oopDesc::encode_store_heap_oop(p, obj2);
2356       }
2357     }
2358   }
2359 
2360 public:
2361   void do_oop(oop* o) {
2362     do_oop_work(o);
2363   }
2364   void do_oop(narrowOop* o) {
2365     do_oop_work(o);
2366   }
2367 };
2368 
2369 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2370   NMethodOopInitializer init;
2371   nm->oops_do(&init);
2372   nm->fix_oop_relocations();
2373 }
2374 
2375 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2376 }
2377 
2378 void ShenandoahHeap::pin_object(oop o) {
2379   heap_region_containing(o)->pin();
2380 }
2381 
2382 void ShenandoahHeap::unpin_object(oop o) {
2383   heap_region_containing(o)->unpin();
2384 }
2385 
2386 
2387 GCTimer* ShenandoahHeap::gc_timer() const {
2388   return _gc_timer;
2389 }
2390 
2391 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2392 private:
2393   int _x;
2394   int *_matrix;
2395   int _num_regions;
2396   oop _p;
2397 
2398 public:
2399   RecordAllRefsOopClosure(int *matrix, int x, size_t num_regions, oop p) :
2400     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2401 
2402   template <class T>
2403   void do_oop_work(T* p) {
2404     oop o = oopDesc::load_decode_heap_oop(p);
2405     if (o != NULL) {
2406       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2407         int y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2408         _matrix[_x * _num_regions + y]++;
2409       }
2410     }
2411   }
2412   void do_oop(oop* p) {
2413     do_oop_work(p);
2414   }
2415 
2416   void do_oop(narrowOop* p) {
2417     do_oop_work(p);
2418   }
2419 
2420 };
2421 
2422 class RecordAllRefsObjectClosure : public ObjectClosure {
2423   int *_matrix;
2424   size_t _num_regions;
2425 
2426 public:
2427   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2428     _matrix(matrix), _num_regions(num_regions) {}
2429 
2430   void do_object(oop p) {
2431     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2432       int x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2433       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2434       p->oop_iterate(&cl);
2435     }
2436   }
2437 };
2438 void ShenandoahHeap::calculate_matrix(int* connections) {
2439   log_develop_trace(gc)("calculating matrix");
2440   ensure_parsability(false);
2441   int num = num_regions();
2442 
2443   for (int i = 0; i < num; i++) {
2444     for (int j = 0; j < num; j++) {
2445       connections[i * num + j] = 0;
2446     }
2447   }
2448 
2449   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2450   roots_iterate(&cl);
2451 
2452   RecordAllRefsObjectClosure cl2(connections, num);
2453   object_iterate(&cl2);
2454 
2455 }
2456 
2457 void ShenandoahHeap::print_matrix(int* connections) {
2458   int num = num_regions();
2459   int cs_regions = 0;
2460   int referenced = 0;
2461 
2462   for (int i = 0; i < num; i++) {
2463     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2464 
2465     int numReferencedRegions = 0;
2466     int numReferencedByRegions = 0;
2467 
2468     for (int j = 0; j < num; j++) {
2469       if (connections[i * num + j] > 0)
2470         numReferencedRegions++;
2471 
2472       if (connections [j * num + i] > 0)
2473         numReferencedByRegions++;
2474 
2475       cs_regions++;
2476       referenced += numReferencedByRegions;
2477     }
2478 
2479     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2480       tty->print("Region %d is referenced by %d regions {",
2481                  i, numReferencedByRegions);
2482       int col_count = 0;
2483       for (int j = 0; j < num; j++) {
2484         int foo = connections[j * num + i];
2485         if (foo > 0) {
2486           col_count++;
2487           if ((col_count % 10) == 0)
2488             tty->print("\n");
2489           tty->print("%d(%d), ", j,foo);
2490         }
2491       }
2492       tty->print("} \n");
2493     }
2494   }
2495 
2496   double avg = (double)referenced / (double) cs_regions;
2497   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2498 }
2499 
2500 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2501 private:
2502   size_t _garbage;
2503 public:
2504   ShenandoahCountGarbageClosure() : _garbage(0) {
2505   }
2506 
2507   bool doHeapRegion(ShenandoahHeapRegion* r) {
2508     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2509       _garbage += r->garbage();
2510     }
2511     return false;
2512   }
2513 
2514   size_t garbage() {
2515     return _garbage;
2516   }
2517 };
2518 
2519 size_t ShenandoahHeap::garbage() {
2520   ShenandoahCountGarbageClosure cl;
2521   heap_region_iterate(&cl);
2522   return cl.garbage();
2523 }
2524 
2525 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2526   return _connection_matrix;
2527 }
2528 
2529 #ifdef ASSERT
2530 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2531   assert(_heap_lock == locked, "must be locked");
2532   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2533 }
2534 
2535 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2536   Thread* thr = Thread::current();
2537   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2538          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2539   "must own heap lock or by VM thread at safepoint");
2540 }
2541 
2542 #endif