1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  48 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  49 
  50 #include "runtime/vmThread.hpp"
  51 #include "services/mallocTracker.hpp"
  52 
  53 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  54 
  55 #ifdef ASSERT
  56 template <class T>
  57 void AssertToSpaceClosure::do_oop_nv(T* p) {
  58   T o = oopDesc::load_heap_oop(p);
  59   if (! oopDesc::is_null(o)) {
  60     oop obj = oopDesc::decode_heap_oop_not_null(o);
  61     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  62            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  63            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  64   }
  65 }
  66 
  67 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  68 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  69 #endif
  70 
  71 const char* ShenandoahHeap::name() const {
  72   return "Shenandoah";
  73 }
  74 
  75 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  76   HeapWord* cur = NULL;
  77   for (cur = start; cur < end; cur++) {
  78     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  79   }
  80 }
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahHeapRegionSet* _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap0_base;
  88   char* _bitmap1_base;
  89 public:
  90   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  91                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  92                          size_t page_size) :
  93     AbstractGangTask("Shenandoah PreTouch",
  94                      Universe::is_fully_initialized() ? GCId::current_raw() :
  95                                                         // During VM initialization there is
  96                                                         // no GC cycle that this task can be
  97                                                         // associated with.
  98                                                         GCId::undefined()),
  99     _bitmap0_base(bitmap0_base),
 100     _bitmap1_base(bitmap1_base),
 101     _regions(regions),
 102     _bitmap_size(bitmap_size),
 103     _page_size(page_size) {
 104     _regions->clear_current_index();
 105   };
 106 
 107   virtual void work(uint worker_id) {
 108     ShenandoahHeapRegion* r = _regions->claim_next();
 109     while (r != NULL) {
 110       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 111                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 112       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 113 
 114       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 115       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 116       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 117 
 118       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 119                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 120       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 121 
 122       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 123                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 124       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 125 
 126       r = _regions->claim_next();
 127     }
 128   }
 129 };
 130 
 131 jint ShenandoahHeap::initialize() {
 132   CollectedHeap::pre_initialize();
 133 
 134   BrooksPointer::initial_checks();
 135 
 136   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 137   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 138 
 139   Universe::check_alignment(max_byte_size,
 140                             ShenandoahHeapRegion::region_size_bytes(),
 141                             "shenandoah heap");
 142   Universe::check_alignment(init_byte_size,
 143                             ShenandoahHeapRegion::region_size_bytes(),
 144                             "shenandoah heap");
 145 
 146   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 147                                                  Arguments::conservative_max_heap_alignment());
 148   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 149 
 150   set_barrier_set(new ShenandoahBarrierSet(this));
 151   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 152   _storage.initialize(pgc_rs, init_byte_size);
 153 
 154   _num_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 155   _max_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   _initialSize = _num_regions * ShenandoahHeapRegion::region_size_bytes();
 157   size_t regionSizeWords = ShenandoahHeapRegion::region_size_bytes() / HeapWordSize;
 158   assert(init_byte_size == _initialSize, "tautology");
 159   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 160   _collection_set = new ShenandoahCollectionSet(_max_regions);
 161   _free_regions = new ShenandoahFreeSet(_max_regions);
 162 
 163   // Initialize fast collection set test structure.
 164   _in_cset_fast_test_length = _max_regions;
 165   _in_cset_fast_test_base =
 166                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 167   _in_cset_fast_test = _in_cset_fast_test_base -
 168                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 169 
 170   _next_top_at_mark_starts_base =
 171                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 172   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 173                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 174 
 175   _complete_top_at_mark_starts_base =
 176                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 177   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 178                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 179 
 180   size_t i = 0;
 181   for (i = 0; i < _num_regions; i++) {
 182     _in_cset_fast_test_base[i] = false; // Not in cset
 183     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 184     _complete_top_at_mark_starts_base[i] = bottom;
 185     _next_top_at_mark_starts_base[i] = bottom;
 186   }
 187 
 188   {
 189     ShenandoahHeapLock lock(this);
 190     for (i = 0; i < _num_regions; i++) {
 191       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 192                                                                regionSizeWords * i, regionSizeWords, i);
 193       _free_regions->add_region(current);
 194       _ordered_regions->add_region(current);
 195     }
 196   }
 197   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 198   assert((((size_t) base()) &
 199           (ShenandoahHeapRegion::region_size_bytes() - 1)) == 0,
 200          "misaligned heap: "PTR_FORMAT, p2i(base()));
 201 
 202   if (log_is_enabled(Trace, gc, region)) {
 203     ResourceMark rm;
 204     outputStream* out = Log(gc, region)::trace_stream();
 205     log_trace(gc, region)("All Regions");
 206     _ordered_regions->print(out);
 207     log_trace(gc, region)("Free Regions");
 208     _free_regions->print(out);
 209   }
 210 
 211   _recycled_regions = NEW_C_HEAP_ARRAY(size_t, _max_regions, mtGC);
 212   _recycled_region_count = 0;
 213 
 214   // The call below uses stuff (the SATB* things) that are in G1, but probably
 215   // belong into a shared location.
 216   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 217                                                SATB_Q_FL_lock,
 218                                                20 /*G1SATBProcessCompletedThreshold */,
 219                                                Shared_SATB_Q_lock);
 220 
 221   // Reserve space for prev and next bitmap.
 222   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 223   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 224 
 225   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 226 
 227   ReservedSpace bitmap0(_bitmap_size, page_size);
 228   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 229   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 230   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 231 
 232   ReservedSpace bitmap1(_bitmap_size, page_size);
 233   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 234   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 235   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 236 
 237   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 238     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 239     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 240                               "couldn't allocate verification bitmap");
 241     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 242     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 243     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 244   }
 245 
 246   if (ShenandoahAlwaysPreTouch) {
 247     assert (!AlwaysPreTouch, "Should have been overridden");
 248 
 249     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 250     // before initialize() below zeroes it with initializing thread. For any given region,
 251     // we touch the region and the corresponding bitmaps from the same thread.
 252 
 253     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 254                        _ordered_regions->count(), page_size);
 255     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 256     _workers->run_task(&cl);
 257   }
 258 
 259   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 260   _complete_mark_bit_map = &_mark_bit_map0;
 261 
 262   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 263   _next_mark_bit_map = &_mark_bit_map1;
 264 
 265   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 266   _partial_gc = new ShenandoahPartialGC(this, _max_regions);
 267 
 268   _monitoring_support = new ShenandoahMonitoringSupport(this);
 269 
 270   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 271 
 272   ShenandoahMarkCompact::initialize();
 273 
 274   return JNI_OK;
 275 }
 276 
 277 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 278   CollectedHeap(),
 279   _shenandoah_policy(policy),
 280   _concurrent_mark_in_progress(0),
 281   _evacuation_in_progress(0),
 282   _full_gc_in_progress(false),
 283   _update_refs_in_progress(false),
 284   _free_regions(NULL),
 285   _collection_set(NULL),
 286   _bytes_allocated_since_cm(0),
 287   _bytes_allocated_during_cm(0),
 288   _allocated_last_gc(0),
 289   _used_start_gc(0),
 290   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 291   _ref_processor(NULL),
 292   _in_cset_fast_test(NULL),
 293   _in_cset_fast_test_base(NULL),
 294   _next_top_at_mark_starts(NULL),
 295   _next_top_at_mark_starts_base(NULL),
 296   _complete_top_at_mark_starts(NULL),
 297   _complete_top_at_mark_starts_base(NULL),
 298   _mark_bit_map0(),
 299   _mark_bit_map1(),
 300   _connection_matrix(NULL),
 301   _cancelled_concgc(false),
 302   _need_update_refs(false),
 303   _need_reset_bitmaps(false),
 304   _heap_lock(0),
 305 #ifdef ASSERT
 306   _heap_lock_owner(NULL),
 307 #endif
 308   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 309 
 310 {
 311   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 312   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 313   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 314 
 315   _scm = new ShenandoahConcurrentMark();
 316   _used = 0;
 317 
 318   _max_workers = MAX2(_max_workers, 1U);
 319   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 320                             /* are_GC_task_threads */true,
 321                             /* are_ConcurrentGC_threads */false);
 322   if (_workers == NULL) {
 323     vm_exit_during_initialization("Failed necessary allocation.");
 324   } else {
 325     _workers->initialize_workers();
 326   }
 327 }
 328 
 329 class ResetNextBitmapTask : public AbstractGangTask {
 330 private:
 331   ShenandoahHeapRegionSet* _regions;
 332 
 333 public:
 334   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 335     AbstractGangTask("Parallel Reset Bitmap Task"),
 336     _regions(regions) {
 337     _regions->clear_current_index();
 338   }
 339 
 340   void work(uint worker_id) {
 341     ShenandoahHeapRegion* region = _regions->claim_next();
 342     ShenandoahHeap* heap = ShenandoahHeap::heap();
 343     while (region != NULL) {
 344       HeapWord* bottom = region->bottom();
 345       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 346       if (top > bottom) {
 347         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 348       }
 349       region = _regions->claim_next();
 350     }
 351   }
 352 };
 353 
 354 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 355   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 356   workers->run_task(&task);
 357 }
 358 
 359 class ResetCompleteBitmapTask : public AbstractGangTask {
 360 private:
 361   ShenandoahHeapRegionSet* _regions;
 362 
 363 public:
 364   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 365     AbstractGangTask("Parallel Reset Bitmap Task"),
 366     _regions(regions) {
 367     _regions->clear_current_index();
 368   }
 369 
 370   void work(uint worker_id) {
 371     ShenandoahHeapRegion* region = _regions->claim_next();
 372     ShenandoahHeap* heap = ShenandoahHeap::heap();
 373     while (region != NULL) {
 374       HeapWord* bottom = region->bottom();
 375       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 376       if (top > bottom) {
 377         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 378       }
 379       region = _regions->claim_next();
 380     }
 381   }
 382 };
 383 
 384 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 385   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 386   workers->run_task(&task);
 387 }
 388 
 389 bool ShenandoahHeap::is_next_bitmap_clear() {
 390   HeapWord* start = _ordered_regions->bottom();
 391   HeapWord* end = _ordered_regions->end();
 392   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 393 }
 394 
 395 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 396   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 397 }
 398 
 399 void ShenandoahHeap::print_on(outputStream* st) const {
 400   st->print("Shenandoah Heap");
 401   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 402   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 403             p2i(reserved_region().start()),
 404             p2i(reserved_region().end()));
 405   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::region_size_bytes() / K);
 406   if (_concurrent_mark_in_progress) {
 407     st->print("marking ");
 408   }
 409   if (_evacuation_in_progress) {
 410     st->print("evacuating ");
 411   }
 412   if (cancelled_concgc()) {
 413     st->print("cancelled ");
 414   }
 415   st->print("\n");
 416 
 417   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 418   st->print   ("Virtual space:");
 419   if (_storage.special()) st->print(" (pinned in memory)");
 420   st->cr();
 421   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 422   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 423   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 424   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 425 
 426   if (Verbose) {
 427     print_heap_regions(st);
 428   }
 429 }
 430 
 431 class InitGCLABClosure : public ThreadClosure {
 432 public:
 433   void do_thread(Thread* thread) {
 434     thread->gclab().initialize(true);
 435   }
 436 };
 437 
 438 void ShenandoahHeap::post_initialize() {
 439   if (UseTLAB) {
 440     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 441     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 442     // Their initialization should be handled separately. Is we miss some threads here,
 443     // then any other TLAB-related activity would fail with asserts.
 444 
 445     InitGCLABClosure init_gclabs;
 446     {
 447       MutexLocker ml(Threads_lock);
 448       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 449         init_gclabs.do_thread(thread);
 450       }
 451     }
 452     gc_threads_do(&init_gclabs);
 453 
 454     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 455     // Now, we will let WorkGang to initialize gclab when new worker is created.
 456     _workers->set_initialize_gclab();
 457   }
 458 
 459   _scm->initialize(_max_workers);
 460 
 461   ref_processing_init();
 462 }
 463 
 464 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 465   size_t sum;
 466 public:
 467 
 468   CalculateUsedRegionClosure() {
 469     sum = 0;
 470   }
 471 
 472   bool doHeapRegion(ShenandoahHeapRegion* r) {
 473     sum = sum + r->used();
 474     return false;
 475   }
 476 
 477   size_t getResult() { return sum;}
 478 };
 479 
 480 size_t ShenandoahHeap::calculateUsed() {
 481   CalculateUsedRegionClosure cl;
 482   heap_region_iterate(&cl);
 483   return cl.getResult();
 484 }
 485 
 486 void ShenandoahHeap::verify_heap_size_consistency() {
 487 
 488   assert(calculateUsed() == used(),
 489          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 490 }
 491 
 492 size_t ShenandoahHeap::used() const {
 493   OrderAccess::acquire();
 494   return _used;
 495 }
 496 
 497 void ShenandoahHeap::increase_used(size_t bytes) {
 498   assert_heaplock_or_safepoint();
 499   _used += bytes;
 500 }
 501 
 502 void ShenandoahHeap::set_used(size_t bytes) {
 503   assert_heaplock_or_safepoint();
 504   _used = bytes;
 505 }
 506 
 507 void ShenandoahHeap::decrease_used(size_t bytes) {
 508   assert_heaplock_or_safepoint();
 509   assert(_used >= bytes, "never decrease heap size by more than we've left");
 510   _used -= bytes;
 511 }
 512 
 513 size_t ShenandoahHeap::capacity() const {
 514   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 515 }
 516 
 517 bool ShenandoahHeap::is_maximal_no_gc() const {
 518   Unimplemented();
 519   return true;
 520 }
 521 
 522 size_t ShenandoahHeap::max_capacity() const {
 523   return _max_regions * ShenandoahHeapRegion::region_size_bytes();
 524 }
 525 
 526 size_t ShenandoahHeap::min_capacity() const {
 527   return _initialSize;
 528 }
 529 
 530 VirtualSpace* ShenandoahHeap::storage() const {
 531   return (VirtualSpace*) &_storage;
 532 }
 533 
 534 bool ShenandoahHeap::is_in(const void* p) const {
 535   HeapWord* heap_base = (HeapWord*) base();
 536   HeapWord* last_region_end = heap_base + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * _num_regions;
 537   return p >= heap_base && p < last_region_end;
 538 }
 539 
 540 bool ShenandoahHeap::is_scavengable(const void* p) {
 541   return true;
 542 }
 543 
 544 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 545   // Retain tlab and allocate object in shared space if
 546   // the amount free in the tlab is too large to discard.
 547   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 548     thread->gclab().record_slow_allocation(size);
 549     return NULL;
 550   }
 551 
 552   // Discard gclab and allocate a new one.
 553   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 554   size_t new_gclab_size = thread->gclab().compute_size(size);
 555 
 556   thread->gclab().clear_before_allocation();
 557 
 558   if (new_gclab_size == 0) {
 559     return NULL;
 560   }
 561 
 562   // Allocate a new GCLAB...
 563   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 564   if (obj == NULL) {
 565     return NULL;
 566   }
 567 
 568   if (ZeroTLAB) {
 569     // ..and clear it.
 570     Copy::zero_to_words(obj, new_gclab_size);
 571   } else {
 572     // ...and zap just allocated object.
 573 #ifdef ASSERT
 574     // Skip mangling the space corresponding to the object header to
 575     // ensure that the returned space is not considered parsable by
 576     // any concurrent GC thread.
 577     size_t hdr_size = oopDesc::header_size();
 578     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 579 #endif // ASSERT
 580   }
 581   thread->gclab().fill(obj, obj + size, new_gclab_size);
 582   return obj;
 583 }
 584 
 585 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 586 #ifdef ASSERT
 587   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 588 #endif
 589   return allocate_new_lab(word_size, _lab_thread);
 590 }
 591 
 592 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 593 #ifdef ASSERT
 594   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 595 #endif
 596   return allocate_new_lab(word_size, _lab_gc);
 597 }
 598 
 599 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, LabType type) {
 600 
 601   HeapWord* result = allocate_memory(word_size, type);
 602 
 603   if (result != NULL) {
 604     assert(! in_collection_set(result), "Never allocate in dirty region");
 605     _bytes_allocated_since_cm += word_size * HeapWordSize;
 606 
 607     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 608 
 609   }
 610   return result;
 611 }
 612 
 613 ShenandoahHeap* ShenandoahHeap::heap() {
 614   CollectedHeap* heap = Universe::heap();
 615   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 616   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 617   return (ShenandoahHeap*) heap;
 618 }
 619 
 620 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 621   CollectedHeap* heap = Universe::heap();
 622   return (ShenandoahHeap*) heap;
 623 }
 624 
 625 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size, LabType type) {
 626 
 627   ShenandoahHeapLock heap_lock(this);
 628 
 629   HeapWord* result = allocate_memory_under_lock(word_size, type);
 630   size_t grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::region_size_bytes() - 1) / ShenandoahHeapRegion::region_size_bytes();
 631 
 632   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 633     grow_heap_by(grow_by);
 634     result = allocate_memory_under_lock(word_size, type);
 635   }
 636 
 637   return result;
 638 }
 639 
 640 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, LabType type) {
 641   HeapWord* result = NULL;
 642   result = allocate_memory_work(word_size, type);
 643 
 644   if (type == _lab_thread) {
 645     // Allocation failed, try full-GC, then retry allocation.
 646     //
 647     // It might happen that one of the threads requesting allocation would unblock
 648     // way later after full-GC happened, only to fail the second allocation, because
 649     // other threads have already depleted the free storage. In this case, a better
 650     // strategy would be to try full-GC again.
 651     //
 652     // Lacking the way to detect progress from "collect" call, we are left with blindly
 653     // retrying for some bounded number of times.
 654     // TODO: Poll if Full GC made enough progress to warrant retry.
 655     int tries = 0;
 656     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 657       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 658                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 659       collect(GCCause::_allocation_failure);
 660       result = allocate_memory_work(word_size, type);
 661     }
 662   }
 663 
 664   // Only update monitoring counters when not calling from a write-barrier.
 665   // Otherwise we might attempt to grab the Service_lock, which we must
 666   // not do when coming from a write-barrier (because the thread might
 667   // already hold the Compile_lock).
 668   if (type == _lab_thread) {
 669     monitoring_support()->update_counters();
 670   }
 671 
 672   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 673                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 674 
 675   return result;
 676 }
 677 
 678 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, LabType type) {
 679   assert_heaplock_owned_by_current_thread();
 680 
 681   if (word_size * HeapWordSize > ShenandoahHeapRegion::region_size_bytes()) {
 682     return allocate_large_memory(word_size);
 683   }
 684 
 685   // Not enough memory in free region set.
 686   // Coming out of full GC, it is possible that there is not
 687   // free region available, so current_index may not be valid.
 688   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 689 
 690   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 691 
 692   if (my_current_region == NULL) {
 693     return NULL; // No more room to make a new region. OOM.
 694   }
 695   assert(my_current_region != NULL, "should have a region at this point");
 696   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 697   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 698 
 699   HeapWord* result = my_current_region->allocate_lab(word_size, type);
 700 
 701   while (result == NULL) {
 702     // 2nd attempt. Try next region.
 703 #ifdef ASSERT
 704     if (my_current_region->free() > 0) {
 705       log_debug(gc, alloc)("Retire region with " SIZE_FORMAT " bytes free", my_current_region->free());
 706     }
 707 #endif
 708     _free_regions->increase_used(my_current_region->free());
 709     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 710     assert(next_region != my_current_region, "must not get current again");
 711     my_current_region = next_region;
 712 
 713     if (my_current_region == NULL) {
 714       return NULL; // No more room to make a new region. OOM.
 715     }
 716     assert(my_current_region != NULL, "should have a region at this point");
 717     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 718     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 719     result = my_current_region->allocate_lab(word_size, type);
 720   }
 721 
 722   my_current_region->increase_live_data_words(word_size);
 723   increase_used(word_size * HeapWordSize);
 724   _free_regions->increase_used(word_size * HeapWordSize);
 725   return result;
 726 }
 727 
 728 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 729   assert_heaplock_owned_by_current_thread();
 730 
 731   size_t required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 732   if (required_regions > _max_regions) return NULL;
 733 
 734   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 735 
 736   HeapWord* result = NULL;
 737 
 738   if (r != NULL)  {
 739     result = r->bottom();
 740 
 741     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 742                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 743   } else {
 744     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 745                              (words * HeapWordSize) / K, p2i(result));
 746   }
 747 
 748 
 749   return result;
 750 
 751 }
 752 
 753 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 754                                         bool*  gc_overhead_limit_was_exceeded) {
 755 
 756   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _lab_thread);
 757   HeapWord* result = filler + BrooksPointer::word_size();
 758   if (filler != NULL) {
 759     BrooksPointer::initialize(oop(result));
 760     _bytes_allocated_since_cm += size * HeapWordSize;
 761 
 762     assert(! in_collection_set(result), "never allocate in targetted region");
 763     return result;
 764   } else {
 765     /*
 766     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 767                   size, used(), _bytes_allocated_since_cm);
 768     {
 769       print_heap_regions();
 770       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 771       _free_regions->print();
 772     }
 773     */
 774     return NULL;
 775   }
 776 }
 777 
 778 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 779 private:
 780   ShenandoahHeap* _heap;
 781   Thread* _thread;
 782   public:
 783   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 784     _heap(heap), _thread(Thread::current()) {
 785   }
 786 
 787   void do_object(oop p) {
 788 
 789     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 790 
 791     assert(_heap->is_marked_complete(p), "expect only marked objects");
 792     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 793       bool evac;
 794       _heap->evacuate_object(p, _thread, evac);
 795     }
 796   }
 797 };
 798 
 799 #ifdef ASSERT
 800 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 801 
 802 public:
 803 
 804   void do_object(oop p) {
 805     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 806       oop p_prime = oopDesc::bs()->read_barrier(p);
 807       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 808       if (p->klass() != p_prime->klass()) {
 809         tty->print_cr("copy has different class than original:");
 810         p->klass()->print_on(tty);
 811         p_prime->klass()->print_on(tty);
 812       }
 813       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 814       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 815       assert(p->size() == p_prime->size(), "Should be the same size");
 816       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 817     }
 818   }
 819 };
 820 
 821 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 822   VerifyEvacuatedObjectClosure verify_evacuation;
 823   marked_object_iterate(from_region, &verify_evacuation);
 824 }
 825 #endif
 826 
 827 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 828 
 829   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 830 
 831   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 832 
 833   marked_object_iterate(from_region, &evacuate_region);
 834 
 835 #ifdef ASSERT
 836   if (ShenandoahVerify && ! cancelled_concgc()) {
 837     verify_evacuated_region(from_region);
 838   }
 839 #endif
 840 }
 841 
 842 class ParallelEvacuationTask : public AbstractGangTask {
 843 private:
 844   ShenandoahHeap* _sh;
 845   ShenandoahCollectionSet* _cs;
 846 
 847 public:
 848   ParallelEvacuationTask(ShenandoahHeap* sh,
 849                          ShenandoahCollectionSet* cs) :
 850     AbstractGangTask("Parallel Evacuation Task"),
 851     _cs(cs),
 852     _sh(sh) {}
 853 
 854   void work(uint worker_id) {
 855 
 856     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 857 
 858     while (from_hr != NULL) {
 859       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 860                                     worker_id,
 861                                     from_hr->region_number());
 862 
 863       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 864       _sh->parallel_evacuate_region(from_hr);
 865 
 866       if (_sh->cancelled_concgc()) {
 867         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 868         break;
 869       }
 870       from_hr = _cs->claim_next();
 871     }
 872   }
 873 };
 874 
 875 void ShenandoahHeap::recycle_dirty_regions() {
 876   ShenandoahHeapLock lock(this);
 877 
 878   size_t bytes_reclaimed = 0;
 879 
 880   ShenandoahHeapRegionSet* set = regions();
 881   set->clear_current_index();
 882 
 883   start_deferred_recycling();
 884 
 885   ShenandoahHeapRegion* r = set->claim_next();
 886   while (r != NULL) {
 887     if (in_collection_set(r)) {
 888       decrease_used(r->used());
 889       bytes_reclaimed += r->used();
 890       defer_recycle(r);
 891     }
 892     r = set->claim_next();
 893   }
 894 
 895   finish_deferred_recycle();
 896 
 897   _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
 898   if (! cancelled_concgc()) {
 899     clear_cset_fast_test();
 900   }
 901 }
 902 
 903 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 904   return _free_regions;
 905 }
 906 
 907 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 908   _ordered_regions->print(st);
 909 }
 910 
 911 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 912 private:
 913   int _index;
 914   const char* _prefix;
 915 
 916 public:
 917   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 918 
 919 private:
 920   template <class T>
 921   inline void do_oop_work(T* p) {
 922     oop o = oopDesc::load_decode_heap_oop(p);
 923     if (o != NULL) {
 924       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 925         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 926                       _prefix, _index,
 927                       p2i(p), p2i(o),
 928                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 929                       o->klass()->internal_name(), p2i(o->klass()));
 930       } else {
 931         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 932                       _prefix, _index,
 933                       p2i(p), p2i(o));
 934       }
 935     } else {
 936       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 937     }
 938     _index++;
 939   }
 940 
 941 public:
 942   void do_oop(oop* p) {
 943     do_oop_work(p);
 944   }
 945 
 946   void do_oop(narrowOop* p) {
 947     do_oop_work(p);
 948   }
 949 
 950 };
 951 
 952 class PrintAllRefsObjectClosure : public ObjectClosure {
 953   const char* _prefix;
 954 
 955 public:
 956   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 957 
 958   void do_object(oop p) {
 959     if (ShenandoahHeap::heap()->is_in(p)) {
 960         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 961                       _prefix, p2i(p),
 962                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 963                       p->klass()->internal_name(), p2i(p->klass()));
 964         PrintAllRefsOopClosure cl(_prefix);
 965         p->oop_iterate(&cl);
 966       }
 967   }
 968 };
 969 
 970 void ShenandoahHeap::print_all_refs(const char* prefix) {
 971   tty->print_cr("printing all references in the heap");
 972   tty->print_cr("root references:");
 973 
 974   ensure_parsability(false);
 975 
 976   PrintAllRefsOopClosure cl(prefix);
 977   roots_iterate(&cl);
 978 
 979   tty->print_cr("heap references:");
 980   PrintAllRefsObjectClosure cl2(prefix);
 981   object_iterate(&cl2);
 982 }
 983 
 984 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 985 private:
 986   ShenandoahHeap*  _heap;
 987 
 988 public:
 989   VerifyAfterMarkingOopClosure() :
 990     _heap(ShenandoahHeap::heap()) { }
 991 
 992 private:
 993   template <class T>
 994   inline void do_oop_work(T* p) {
 995     oop o = oopDesc::load_decode_heap_oop(p);
 996     if (o != NULL) {
 997       if (! _heap->is_marked_complete(o)) {
 998         _heap->print_heap_regions();
 999         _heap->print_all_refs("post-mark");
1000         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1001                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1002         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1003 
1004         tty->print_cr("oop class: %s", o->klass()->internal_name());
1005         if (_heap->is_in(p)) {
1006           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1007           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1008           referrer->print();
1009           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1010         }
1011         tty->print_cr("heap region containing object:");
1012         _heap->heap_region_containing(o)->print();
1013         tty->print_cr("heap region containing referrer:");
1014         _heap->heap_region_containing(p)->print();
1015         tty->print_cr("heap region containing forwardee:");
1016         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1017       }
1018       assert(o->is_oop(), "oop must be an oop");
1019       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1020       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1021         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1022                       p2i(p),
1023                       BOOL_TO_STR(_heap->in_collection_set(p)),
1024                       p2i(o),
1025                       BOOL_TO_STR(_heap->in_collection_set(o)),
1026                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1027                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1028         tty->print_cr("oop class: %s", o->klass()->internal_name());
1029       }
1030       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1031       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1032       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1033     }
1034   }
1035 
1036 public:
1037   void do_oop(oop* p) {
1038     do_oop_work(p);
1039   }
1040 
1041   void do_oop(narrowOop* p) {
1042     do_oop_work(p);
1043   }
1044 
1045 };
1046 
1047 void ShenandoahHeap::verify_heap_after_marking() {
1048 
1049   verify_heap_size_consistency();
1050 
1051   log_trace(gc)("verifying heap after marking");
1052 
1053   VerifyAfterMarkingOopClosure cl;
1054   roots_iterate(&cl);
1055   ObjectToOopClosure objs(&cl);
1056   object_iterate(&objs);
1057 }
1058 
1059 
1060 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1061   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1062 
1063   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1064   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1065   size_t required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1066   size_t index = r->region_number();
1067 
1068 
1069   assert(!r->has_live(), "liveness must be zero");
1070 
1071   for(size_t i = 0; i < required_regions; i++) {
1072 
1073     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1074 
1075     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1076            "expect correct humongous start or continuation");
1077 
1078     if (log_is_enabled(Debug, gc, humongous)) {
1079       log_debug(gc, humongous)("reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1080       ResourceMark rm;
1081       outputStream* out = Log(gc, humongous)::debug_stream();
1082       region->print_on(out);
1083     }
1084 
1085     region->recycle();
1086     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::region_size_bytes());
1087   }
1088 }
1089 
1090 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1091 
1092   bool doHeapRegion(ShenandoahHeapRegion* r) {
1093     ShenandoahHeap* heap = ShenandoahHeap::heap();
1094 
1095     if (r->is_humongous_start()) {
1096       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1097       if (! heap->is_marked_complete(humongous_obj)) {
1098 
1099         heap->reclaim_humongous_region_at(r);
1100       }
1101     }
1102     return false;
1103   }
1104 };
1105 
1106 #ifdef ASSERT
1107 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1108   bool doHeapRegion(ShenandoahHeapRegion* r) {
1109     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1110     return false;
1111   }
1112 };
1113 #endif
1114 
1115 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1116   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1117 
1118   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1119 
1120   if (!cancelled_concgc()) {
1121     // Allocations might have happened before we STWed here, record peak:
1122     shenandoahPolicy()->record_peak_occupancy();
1123 
1124     recycle_dirty_regions();
1125 
1126     ensure_parsability(true);
1127 
1128     if (UseShenandoahMatrix && PrintShenandoahMatrix) {
1129       outputStream* log = Log(gc)::info_stream();
1130       connection_matrix()->print_on(log);
1131     }
1132 
1133     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1134       verify_heap_reachable_at_safepoint();
1135     }
1136 
1137 #ifdef ASSERT
1138     if (ShenandoahVerify) {
1139       verify_heap_after_marking();
1140     }
1141 #endif
1142 
1143     // NOTE: This needs to be done during a stop the world pause, because
1144     // putting regions into the collection set concurrently with Java threads
1145     // will create a race. In particular, acmp could fail because when we
1146     // resolve the first operand, the containing region might not yet be in
1147     // the collection set, and thus return the original oop. When the 2nd
1148     // operand gets resolved, the region could be in the collection set
1149     // and the oop gets evacuated. If both operands have originally been
1150     // the same, we get false negatives.
1151 
1152     {
1153       ShenandoahHeapLock lock(this);
1154       _collection_set->clear();
1155       _free_regions->clear();
1156 
1157       ShenandoahReclaimHumongousRegionsClosure reclaim;
1158       heap_region_iterate(&reclaim);
1159 
1160 #ifdef ASSERT
1161       CheckCollectionSetClosure ccsc;
1162       _ordered_regions->heap_region_iterate(&ccsc);
1163 #endif
1164 
1165       _shenandoah_policy->choose_collection_set(_collection_set);
1166 
1167       _shenandoah_policy->choose_free_set(_free_regions);
1168     }
1169 
1170     _bytes_allocated_since_cm = 0;
1171 
1172     Universe::update_heap_info_at_gc();
1173   }
1174 }
1175 
1176 
1177 class RetireTLABClosure : public ThreadClosure {
1178 private:
1179   bool _retire;
1180 
1181 public:
1182   RetireTLABClosure(bool retire) : _retire(retire) {
1183   }
1184 
1185   void do_thread(Thread* thread) {
1186     thread->gclab().make_parsable(_retire);
1187   }
1188 };
1189 
1190 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1191   if (UseTLAB) {
1192     CollectedHeap::ensure_parsability(retire_tlabs);
1193     RetireTLABClosure cl(retire_tlabs);
1194     Threads::threads_do(&cl);
1195   }
1196 }
1197 
1198 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1199 private:
1200   ShenandoahHeap* _heap;
1201   Thread* _thread;
1202 public:
1203   ShenandoahEvacuateUpdateRootsClosure() :
1204     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1205   }
1206 
1207 private:
1208   template <class T>
1209   void do_oop_work(T* p) {
1210     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1211 
1212     T o = oopDesc::load_heap_oop(p);
1213     if (! oopDesc::is_null(o)) {
1214       oop obj = oopDesc::decode_heap_oop_not_null(o);
1215       if (_heap->in_collection_set(obj)) {
1216         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1217                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1218         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1219         if (oopDesc::unsafe_equals(resolved, obj)) {
1220           bool evac;
1221           resolved = _heap->evacuate_object(obj, _thread, evac);
1222         }
1223         oopDesc::encode_store_heap_oop(p, resolved);
1224       }
1225     }
1226   }
1227 
1228 public:
1229   void do_oop(oop* p) {
1230     do_oop_work(p);
1231   }
1232   void do_oop(narrowOop* p) {
1233     do_oop_work(p);
1234   }
1235 };
1236 
1237 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1238   ShenandoahRootEvacuator* _rp;
1239 public:
1240 
1241   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1242     AbstractGangTask("Shenandoah evacuate and update roots"),
1243     _rp(rp)
1244   {
1245     // Nothing else to do.
1246   }
1247 
1248   void work(uint worker_id) {
1249     ShenandoahEvacuateUpdateRootsClosure cl;
1250     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1251 
1252     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1253   }
1254 };
1255 
1256 class ShenandoahFixRootsTask : public AbstractGangTask {
1257   ShenandoahRootEvacuator* _rp;
1258 public:
1259 
1260   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1261     AbstractGangTask("Shenandoah update roots"),
1262     _rp(rp)
1263   {
1264     // Nothing else to do.
1265   }
1266 
1267   void work(uint worker_id) {
1268     SCMUpdateRefsClosure cl;
1269     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1270 
1271     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1272   }
1273 };
1274 void ShenandoahHeap::evacuate_and_update_roots() {
1275 
1276   COMPILER2_PRESENT(DerivedPointerTable::clear());
1277 
1278   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1279 
1280   {
1281     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::init_evac);
1282     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1283     workers()->run_task(&roots_task);
1284   }
1285 
1286   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1287 
1288   if (cancelled_concgc()) {
1289     // If initial evacuation has been cancelled, we need to update all references
1290     // after all workers have finished. Otherwise we might run into the following problem:
1291     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1292     // GC thread 2 evacuates the same object X to to-space
1293     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1294     // clear() and update_pointers() must always be called in pairs,
1295     // cannot nest with above clear()/update_pointers().
1296     COMPILER2_PRESENT(DerivedPointerTable::clear());
1297     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::init_evac);
1298     ShenandoahFixRootsTask update_roots_task(&rp);
1299     workers()->run_task(&update_roots_task);
1300     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1301   }
1302 
1303 #ifdef ASSERT
1304   {
1305     AssertToSpaceClosure cl;
1306     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1307     ShenandoahRootEvacuator rp(this, 1);
1308     rp.process_evacuate_roots(&cl, &code_cl, 0);
1309   }
1310 #endif
1311 }
1312 
1313 
1314 void ShenandoahHeap::do_evacuation() {
1315 
1316   parallel_evacuate();
1317 
1318   if (ShenandoahVerify && ! cancelled_concgc()) {
1319     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1320     if (Thread::current()->is_VM_thread()) {
1321       verify_after_evacuation.doit();
1322     } else {
1323       VMThread::execute(&verify_after_evacuation);
1324     }
1325   }
1326 
1327 }
1328 
1329 void ShenandoahHeap::parallel_evacuate() {
1330   log_develop_trace(gc)("starting parallel_evacuate");
1331 
1332   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1333 
1334   if (log_is_enabled(Trace, gc, region)) {
1335     ResourceMark rm;
1336     outputStream *out = Log(gc, region)::trace_stream();
1337     out->print("Printing all available regions");
1338     print_heap_regions(out);
1339   }
1340 
1341   if (log_is_enabled(Trace, gc, cset)) {
1342     ResourceMark rm;
1343     outputStream *out = Log(gc, cset)::trace_stream();
1344     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1345     _collection_set->print(out);
1346 
1347     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1348     _free_regions->print(out);
1349   }
1350 
1351   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1352 
1353 
1354   workers()->run_task(&evacuationTask);
1355 
1356   if (log_is_enabled(Trace, gc, cset)) {
1357     ResourceMark rm;
1358     outputStream *out = Log(gc, cset)::trace_stream();
1359     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1360                _collection_set->count());
1361 
1362     _collection_set->print(out);
1363 
1364     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1365                _free_regions->count());
1366     _free_regions->print(out);
1367 
1368   }
1369 
1370   if (log_is_enabled(Trace, gc, region)) {
1371     ResourceMark rm;
1372     outputStream *out = Log(gc, region)::trace_stream();
1373     out->print_cr("all regions after evacuation:");
1374     print_heap_regions(out);
1375   }
1376 
1377   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1378 }
1379 
1380 class VerifyEvacuationClosure: public ExtendedOopClosure {
1381 private:
1382   ShenandoahHeap*  _heap;
1383   ShenandoahHeapRegion* _from_region;
1384 
1385 public:
1386   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1387     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1388 private:
1389   template <class T>
1390   inline void do_oop_work(T* p) {
1391     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1392     if (! oopDesc::is_null(heap_oop)) {
1393       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1394     }
1395   }
1396 
1397 public:
1398   void do_oop(oop* p)       {
1399     do_oop_work(p);
1400   }
1401 
1402   void do_oop(narrowOop* p) {
1403     do_oop_work(p);
1404   }
1405 
1406 };
1407 
1408 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1409 
1410   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1411 
1412   CodeBlobToOopClosure blobsCl(cl, false);
1413   CLDToOopClosure cldCl(cl);
1414 
1415   ShenandoahRootProcessor rp(this, 1);
1416   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1417 }
1418 
1419 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1420 
1421   VerifyEvacuationClosure rootsCl(from_region);
1422   roots_iterate(&rootsCl);
1423 
1424 }
1425 
1426 bool ShenandoahHeap::supports_tlab_allocation() const {
1427   return true;
1428 }
1429 
1430 
1431 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1432   size_t idx = _free_regions->current_index();
1433   ShenandoahHeapRegion* current = _free_regions->get_or_null(idx);
1434   if (current == NULL) {
1435     return 0;
1436   } else if (current->free() >= MinTLABSize) {
1437     // Current region has enough space left, can use it.
1438     return current->free();
1439   } else {
1440     // No more space in current region, peek next region
1441     return _free_regions->unsafe_peek_next_no_humongous();
1442   }
1443 }
1444 
1445 size_t ShenandoahHeap::max_tlab_size() const {
1446   return ShenandoahHeapRegion::region_size_bytes();
1447 }
1448 
1449 class ResizeGCLABClosure : public ThreadClosure {
1450 public:
1451   void do_thread(Thread* thread) {
1452     thread->gclab().resize();
1453   }
1454 };
1455 
1456 void ShenandoahHeap::resize_all_tlabs() {
1457   CollectedHeap::resize_all_tlabs();
1458 
1459   ResizeGCLABClosure cl;
1460   Threads::threads_do(&cl);
1461 }
1462 
1463 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1464 public:
1465   void do_thread(Thread* thread) {
1466     thread->gclab().accumulate_statistics();
1467     thread->gclab().initialize_statistics();
1468   }
1469 };
1470 
1471 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1472   AccumulateStatisticsGCLABClosure cl;
1473   Threads::threads_do(&cl);
1474 }
1475 
1476 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1477   return true;
1478 }
1479 
1480 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1481   // Overridden to do nothing.
1482   return new_obj;
1483 }
1484 
1485 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1486   return true;
1487 }
1488 
1489 bool ShenandoahHeap::card_mark_must_follow_store() const {
1490   return false;
1491 }
1492 
1493 void ShenandoahHeap::collect(GCCause::Cause cause) {
1494   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1495   if (GCCause::is_user_requested_gc(cause)) {
1496     if (! DisableExplicitGC) {
1497       _concurrent_gc_thread->do_full_gc(cause);
1498     }
1499   } else if (cause == GCCause::_allocation_failure) {
1500     collector_policy()->set_should_clear_all_soft_refs(true);
1501     _concurrent_gc_thread->do_full_gc(cause);
1502   }
1503 }
1504 
1505 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1506   //assert(false, "Shouldn't need to do full collections");
1507 }
1508 
1509 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1510   Unimplemented();
1511   return NULL;
1512 
1513 }
1514 
1515 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1516   return _shenandoah_policy;
1517 }
1518 
1519 
1520 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1521   Space* sp = heap_region_containing(addr);
1522   if (sp != NULL) {
1523     return sp->block_start(addr);
1524   }
1525   return NULL;
1526 }
1527 
1528 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1529   Space* sp = heap_region_containing(addr);
1530   assert(sp != NULL, "block_size of address outside of heap");
1531   return sp->block_size(addr);
1532 }
1533 
1534 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1535   Space* sp = heap_region_containing(addr);
1536   return sp->block_is_obj(addr);
1537 }
1538 
1539 jlong ShenandoahHeap::millis_since_last_gc() {
1540   return 0;
1541 }
1542 
1543 void ShenandoahHeap::prepare_for_verify() {
1544   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1545     ensure_parsability(false);
1546   }
1547 }
1548 
1549 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1550   workers()->print_worker_threads_on(st);
1551 }
1552 
1553 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1554   workers()->threads_do(tcl);
1555 }
1556 
1557 void ShenandoahHeap::print_tracing_info() const {
1558   if (log_is_enabled(Info, gc, stats)) {
1559     ResourceMark rm;
1560     outputStream* out = Log(gc, stats)::info_stream();
1561     _shenandoah_policy->print_tracing_info(out);
1562   }
1563 }
1564 
1565 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1566 private:
1567   ShenandoahHeap*  _heap;
1568   VerifyOption     _vo;
1569   bool             _failures;
1570 public:
1571   // _vo == UsePrevMarking -> use "prev" marking information,
1572   // _vo == UseNextMarking -> use "next" marking information,
1573   // _vo == UseMarkWord    -> use mark word from object header.
1574   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1575     _heap(ShenandoahHeap::heap()),
1576     _vo(vo),
1577     _failures(false) { }
1578 
1579   bool failures() { return _failures; }
1580 
1581 private:
1582   template <class T>
1583   inline void do_oop_work(T* p) {
1584     oop obj = oopDesc::load_decode_heap_oop(p);
1585     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1586       { // Just for debugging.
1587         tty->print_cr("Root location "PTR_FORMAT
1588                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1589         //      obj->print_on(tty);
1590       }
1591     }
1592     guarantee(obj->is_oop_or_null(), "is oop or null");
1593   }
1594 
1595 public:
1596   void do_oop(oop* p)       {
1597     do_oop_work(p);
1598   }
1599 
1600   void do_oop(narrowOop* p) {
1601     do_oop_work(p);
1602   }
1603 
1604 };
1605 
1606 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1607 private:
1608   ShenandoahVerifyRootsClosure _rootsCl;
1609 public:
1610   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1611     _rootsCl(rc) {};
1612 
1613   void do_object(oop p) {
1614     _rootsCl.do_oop(&p);
1615   }
1616 };
1617 
1618 void ShenandoahHeap::verify(VerifyOption vo) {
1619   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1620 
1621     ShenandoahVerifyRootsClosure rootsCl(vo);
1622 
1623     assert(Thread::current()->is_VM_thread(),
1624            "Expected to be executed serially by the VM thread at this point");
1625 
1626     roots_iterate(&rootsCl);
1627 
1628     bool failures = rootsCl.failures();
1629     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1630 
1631     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1632 
1633     object_iterate(&heapCl);
1634     // TODO: Implement rest of it.
1635   } else {
1636     tty->print("(SKIPPING roots, heapRegions, remset) ");
1637   }
1638 }
1639 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1640   return _free_regions->capacity();
1641 }
1642 
1643 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1644   ObjectClosure* _cl;
1645 public:
1646   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1647   bool doHeapRegion(ShenandoahHeapRegion* r) {
1648     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1649     return false;
1650   }
1651 };
1652 
1653 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1654   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1655   heap_region_iterate(&blk, false, true);
1656 }
1657 
1658 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1659 private:
1660   ShenandoahHeap* _heap;
1661 
1662 public:
1663   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1664 
1665 private:
1666   template <class T>
1667   inline void do_oop_work(T* p) {
1668     T o = oopDesc::load_heap_oop(p);
1669     if (!oopDesc::is_null(o)) {
1670       oop obj = oopDesc::decode_heap_oop_not_null(o);
1671       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1672     }
1673   }
1674 public:
1675   void do_oop(oop* p) {
1676     do_oop_work(p);
1677   }
1678   void do_oop(narrowOop* p) {
1679     do_oop_work(p);
1680   }
1681 };
1682 
1683 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1684 private:
1685   ObjectClosure* _cl;
1686 public:
1687   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1688 
1689   virtual void do_object(oop obj) {
1690     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1691             "avoid double-counting: only non-forwarded objects here");
1692 
1693     // Fix up the ptrs.
1694     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1695     obj->oop_iterate(&adjust_ptrs);
1696 
1697     // Can reply the object now:
1698     _cl->do_object(obj);
1699   }
1700 };
1701 
1702 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1703   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1704 
1705   // Safe iteration does objects only with correct references.
1706   // This is why we skip dirty regions that have stale copies of objects,
1707   // and fix up the pointers in the returned objects.
1708 
1709   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1710   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1711   heap_region_iterate(&blk,
1712                       /* skip_dirty_regions = */ true,
1713                       /* skip_humongous_continuations = */ true);
1714 
1715   _need_update_refs = false; // already updated the references
1716 }
1717 
1718 // Apply blk->doHeapRegion() on all committed regions in address order,
1719 // terminating the iteration early if doHeapRegion() returns true.
1720 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1721   for (size_t i = 0; i < _num_regions; i++) {
1722     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1723     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1724       continue;
1725     }
1726     if (skip_dirty_regions && in_collection_set(current)) {
1727       continue;
1728     }
1729     if (blk->doHeapRegion(current)) {
1730       return;
1731     }
1732   }
1733 }
1734 
1735 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1736   ShenandoahHeap* sh;
1737 public:
1738   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1739 
1740   bool doHeapRegion(ShenandoahHeapRegion* r) {
1741     r->clear_live_data();
1742     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1743     return false;
1744   }
1745 };
1746 
1747 void ShenandoahHeap::start_concurrent_marking() {
1748 
1749   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1750   accumulate_statistics_all_tlabs();
1751   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1752 
1753   set_concurrent_mark_in_progress(true);
1754   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1755   if (UseTLAB) {
1756     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1757     ensure_parsability(true);
1758     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1759   }
1760 
1761   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1762   _used_start_gc = used();
1763 
1764   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1765   ClearLivenessClosure clc(this);
1766   heap_region_iterate(&clc);
1767   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1768 
1769   if (UseShenandoahMatrix) {
1770     connection_matrix()->clear_all();
1771   }
1772 
1773   // Make above changes visible to worker threads
1774   OrderAccess::fence();
1775 
1776   concurrentMark()->init_mark_roots();
1777 }
1778 
1779 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1780 
1781   ShenandoahHeap* _sh;
1782 
1783 public:
1784   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1785 
1786   template<class T> void do_oop_nv(T* p) {
1787     T heap_oop = oopDesc::load_heap_oop(p);
1788     if (!oopDesc::is_null(heap_oop)) {
1789       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1790       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1791                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1792                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1793                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1794                 obj->klass()->external_name(),
1795                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1796                 );
1797       obj = oopDesc::bs()->read_barrier(obj);
1798       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1799       guarantee(obj->is_oop(), "is_oop");
1800       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1801     }
1802   }
1803 
1804   void do_oop(oop* p)       { do_oop_nv(p); }
1805   void do_oop(narrowOop* p) { do_oop_nv(p); }
1806 
1807 };
1808 
1809 void ShenandoahHeap::verify_heap_after_evacuation() {
1810 
1811   verify_heap_size_consistency();
1812 
1813   ensure_parsability(false);
1814 
1815   VerifyAfterEvacuationClosure cl;
1816   roots_iterate(&cl);
1817 
1818   ObjectToOopClosure objs(&cl);
1819   object_iterate(&objs);
1820 
1821 }
1822 
1823 void ShenandoahHeap::swap_mark_bitmaps() {
1824   // Swap bitmaps.
1825   CMBitMap* tmp1 = _complete_mark_bit_map;
1826   _complete_mark_bit_map = _next_mark_bit_map;
1827   _next_mark_bit_map = tmp1;
1828 
1829   // Swap top-at-mark-start pointers
1830   HeapWord** tmp2 = _complete_top_at_mark_starts;
1831   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1832   _next_top_at_mark_starts = tmp2;
1833 
1834   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1835   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1836   _next_top_at_mark_starts_base = tmp3;
1837 }
1838 
1839 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1840 private:
1841   SCMObjToScanQueue* _queue;
1842   ShenandoahHeap* _heap;
1843   CMBitMap* _map;
1844   bool _check_matrix;
1845   oop _obj;
1846 public:
1847   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1848           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1849   template <class T>
1850   void do_oop_work(T* p) {
1851     T o = oopDesc::load_heap_oop(p);
1852     if (!oopDesc::is_null(o)) {
1853       oop obj = oopDesc::decode_heap_oop_not_null(o);
1854       guarantee(check_obj_alignment(obj), "sanity");
1855 
1856       guarantee(!oopDesc::is_null(obj), "sanity");
1857       guarantee(_heap->is_in(obj), "sanity");
1858 
1859       oop forw = BrooksPointer::forwardee(obj);
1860       guarantee(!oopDesc::is_null(forw), "sanity");
1861       guarantee(_heap->is_in(forw), "sanity");
1862 
1863       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1864 
1865       if (_check_matrix) {
1866         size_t from_idx = _heap->heap_region_index_containing(p);
1867         size_t to_idx = _heap->heap_region_index_containing(obj);
1868         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1869           tty->print_cr("from-obj: ");
1870           _obj->print_on(tty);
1871           tty->print_cr("to-obj:");
1872           obj->print_on(tty);
1873           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1874           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1875           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1876           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1877           tty->print_cr("from-idx: " SIZE_FORMAT ", to-idx: " SIZE_FORMAT, from_idx, to_idx);
1878 
1879           oop fwd_from = BrooksPointer::forwardee(_obj);
1880           oop fwd_to = BrooksPointer::forwardee(obj);
1881           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1882           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1883           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1884           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1885           size_t fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1886           size_t fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1887           tty->print_cr("forward(from-idx): " SIZE_FORMAT ", forward(to-idx): " SIZE_FORMAT, fwd_from_idx, fwd_to_idx);
1888           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1889         }
1890         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1891         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1892       }
1893 
1894       if (_map->parMark((HeapWord*) obj)) {
1895         _queue->push(SCMTask(obj));
1896       }
1897     }
1898   }
1899 
1900   void do_oop(oop* p) { do_oop_work(p); }
1901   void do_oop(narrowOop* p) { do_oop_work(p); }
1902   void set_obj(oop o) { _obj = o; }
1903 };
1904 
1905 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1906   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1907   guarantee(ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix),
1908             "only when these are enabled, and bitmap is initialized in ShenandoahHeap::initialize");
1909 
1910   OrderAccess::fence();
1911   ensure_parsability(false);
1912 
1913   // Allocate temporary bitmap for storing marking wavefront:
1914   MemRegion mr = MemRegion(_verification_bit_map.startWord(), _verification_bit_map.endWord());
1915   _verification_bit_map.clear_range_large(mr);
1916 
1917   // Initialize a single queue
1918   SCMObjToScanQueue* q = new SCMObjToScanQueue();
1919   q->initialize();
1920 
1921   // Scan root set
1922   ShenandoahRootProcessor rp(this, 1);
1923 
1924   {
1925     VerifyReachableHeapClosure cl(q, &_verification_bit_map, false);
1926     CLDToOopClosure cld_cl(&cl);
1927     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
1928     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
1929   }
1930 
1931   // Finish the scan
1932   {
1933     VerifyReachableHeapClosure cl(q, &_verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
1934     SCMTask task;
1935     while ((q->pop_buffer(task) ||
1936             q->pop_local(task) ||
1937             q->pop_overflow(task))) {
1938       oop obj = task.obj();
1939       assert(!oopDesc::is_null(obj), "must not be null");
1940       cl.set_obj(obj);
1941       obj->oop_iterate(&cl);
1942     }
1943   }
1944 
1945   // Clean up!
1946   delete(q);
1947 }
1948 
1949 void ShenandoahHeap::stop_concurrent_marking() {
1950   assert(concurrent_mark_in_progress(), "How else could we get here?");
1951   if (! cancelled_concgc()) {
1952     // If we needed to update refs, and concurrent marking has been cancelled,
1953     // we need to finish updating references.
1954     set_need_update_refs(false);
1955     swap_mark_bitmaps();
1956   }
1957   set_concurrent_mark_in_progress(false);
1958 
1959   if (log_is_enabled(Trace, gc, region)) {
1960     ResourceMark rm;
1961     outputStream* out = Log(gc, region)::trace_stream();
1962     print_heap_regions(out);
1963   }
1964 
1965 }
1966 
1967 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1968   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1969   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1970 }
1971 
1972 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1973   // Note: it is important to first release the _evacuation_in_progress flag here,
1974   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1975   // in case a VM task is pending.
1976   set_evacuation_in_progress(in_progress);
1977   MutexLocker mu(Threads_lock);
1978   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1979 }
1980 
1981 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1982   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1983   set_evacuation_in_progress(in_progress);
1984   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1985 }
1986 
1987 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1988   _evacuation_in_progress = in_progress ? 1 : 0;
1989   OrderAccess::fence();
1990 }
1991 
1992 void ShenandoahHeap::verify_copy(oop p,oop c){
1993     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1994     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1995     if (p->klass() != c->klass()) {
1996       print_heap_regions();
1997     }
1998     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1999     assert(p->size() == c->size(), "verify size");
2000     // Object may have been locked between copy and verification
2001     //    assert(p->mark() == c->mark(), "verify mark");
2002     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2003   }
2004 
2005 void ShenandoahHeap::oom_during_evacuation() {
2006   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2007                         Thread::current()->osthread()->thread_id());
2008 
2009   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2010   collector_policy()->set_should_clear_all_soft_refs(true);
2011   concurrent_thread()->try_set_full_gc();
2012   cancel_concgc(_oom_evacuation);
2013 
2014   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2015     assert(! Threads_lock->owned_by_self()
2016            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2017     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2018     while (_evacuation_in_progress) { // wait.
2019       Thread::current()->_ParkEvent->park(1);
2020     }
2021   }
2022 
2023 }
2024 
2025 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2026   // Initialize Brooks pointer for the next object
2027   HeapWord* result = obj + BrooksPointer::word_size();
2028   BrooksPointer::initialize(oop(result));
2029   return result;
2030 }
2031 
2032 uint ShenandoahHeap::oop_extra_words() {
2033   return BrooksPointer::word_size();
2034 }
2035 
2036 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2037   size_t old_num_regions = _num_regions;
2038   ensure_new_regions(num_regions);
2039   for (size_t i = 0; i < num_regions; i++) {
2040     size_t new_region_index = i + old_num_regions;
2041     HeapWord* start = ((HeapWord*) base()) + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * new_region_index;
2042     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::region_size_bytes() / HeapWordSize, new_region_index);
2043 
2044     if (log_is_enabled(Trace, gc, region)) {
2045       ResourceMark rm;
2046       outputStream* out = Log(gc, region)::trace_stream();
2047       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2048       new_region->print_on(out);
2049     }
2050 
2051     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2052     _ordered_regions->add_region(new_region);
2053     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2054     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2055     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2056 
2057     _free_regions->add_region(new_region);
2058   }
2059 }
2060 
2061 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2062 
2063   size_t num_regions = _num_regions;
2064   size_t new_num_regions = num_regions + new_regions;
2065   assert(new_num_regions <= _max_regions, "we checked this earlier");
2066 
2067   size_t expand_size = new_regions * ShenandoahHeapRegion::region_size_bytes();
2068   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2069   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2070   assert(success, "should always be able to expand by requested size");
2071 
2072   _num_regions = new_num_regions;
2073 
2074 }
2075 
2076 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2077   _heap(ShenandoahHeap::heap_no_check()) {
2078 }
2079 
2080 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2081   _heap = heap;
2082 }
2083 
2084 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2085 
2086   assert(_heap != NULL, "sanity");
2087   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2088 #ifdef ASSERT
2089   if (_heap->concurrent_mark_in_progress()) {
2090     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2091   }
2092 #endif
2093   assert(!oopDesc::is_null(obj), "null");
2094   return _heap->is_marked_next(obj);
2095 }
2096 
2097 void ShenandoahHeap::ref_processing_init() {
2098   MemRegion mr = reserved_region();
2099 
2100   isAlive.init(ShenandoahHeap::heap());
2101   assert(_max_workers > 0, "Sanity");
2102 
2103   _ref_processor =
2104     new ReferenceProcessor(mr,    // span
2105                            ParallelRefProcEnabled,
2106                            // mt processing
2107                            _max_workers,
2108                            // degree of mt processing
2109                            true,
2110                            // mt discovery
2111                            _max_workers,
2112                            // degree of mt discovery
2113                            false,
2114                            // Reference discovery is not atomic
2115                            &isAlive);
2116 }
2117 
2118 size_t ShenandoahHeap::num_regions() {
2119   return _num_regions;
2120 }
2121 
2122 size_t ShenandoahHeap::max_regions() {
2123   return _max_regions;
2124 }
2125 
2126 GCTracer* ShenandoahHeap::tracer() {
2127   return shenandoahPolicy()->tracer();
2128 }
2129 
2130 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2131   return _free_regions->used();
2132 }
2133 
2134 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2135   if (try_cancel_concgc()) {
2136     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2137     _shenandoah_policy->report_concgc_cancelled();
2138   }
2139 }
2140 
2141 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2142   if (try_cancel_concgc()) {
2143     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2144     _shenandoah_policy->report_concgc_cancelled();
2145   }
2146 }
2147 
2148 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2149   switch (cause) {
2150     case _oom_evacuation:
2151       return "Out of memory for evacuation";
2152     case _vm_stop:
2153       return "Stopping VM";
2154     default:
2155       return "Unknown";
2156   }
2157 }
2158 
2159 uint ShenandoahHeap::max_workers() {
2160   return _max_workers;
2161 }
2162 
2163 void ShenandoahHeap::stop() {
2164   // The shutdown sequence should be able to terminate when GC is running.
2165 
2166   // Step 1. Notify control thread that we are in shutdown.
2167   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2168   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2169   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2170 
2171   // Step 2. Notify GC workers that we are cancelling GC.
2172   cancel_concgc(_vm_stop);
2173 
2174   // Step 3. Wait until GC worker exits normally.
2175   _concurrent_gc_thread->stop();
2176 }
2177 
2178 void ShenandoahHeap::unload_classes_and_cleanup_tables() {
2179   ShenandoahForwardedIsAliveClosure is_alive;
2180   // Unload classes and purge SystemDictionary.
2181   bool purged_class = SystemDictionary::do_unloading(&is_alive, true);
2182   ParallelCleaningTask unlink_task(&is_alive, true, true, _workers->active_workers(), purged_class);
2183   _workers->run_task(&unlink_task);
2184   ClassLoaderDataGraph::purge();
2185 }
2186 
2187 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2188   _need_update_refs = need_update_refs;
2189 }
2190 
2191 //fixme this should be in heapregionset
2192 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2193   size_t region_idx = r->region_number() + 1;
2194   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2195   guarantee(next->region_number() == region_idx, "region number must match");
2196   while (next->is_humongous()) {
2197     region_idx = next->region_number() + 1;
2198     next = _ordered_regions->get(region_idx);
2199     guarantee(next->region_number() == region_idx, "region number must match");
2200   }
2201   return next;
2202 }
2203 
2204 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2205   _in_cset_fast_test_base[region_index] = b;
2206 }
2207 
2208 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2209   return _monitoring_support;
2210 }
2211 
2212 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2213   return _complete_mark_bit_map;
2214 }
2215 
2216 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2217   return _next_mark_bit_map;
2218 }
2219 
2220 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2221   _free_regions->add_region(r);
2222 }
2223 
2224 void ShenandoahHeap::clear_free_regions() {
2225   _free_regions->clear();
2226 }
2227 
2228 address ShenandoahHeap::in_cset_fast_test_addr() {
2229   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2230 }
2231 
2232 address ShenandoahHeap::cancelled_concgc_addr() {
2233   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2234 }
2235 
2236 void ShenandoahHeap::clear_cset_fast_test() {
2237   assert(_in_cset_fast_test_base != NULL, "sanity");
2238   memset(_in_cset_fast_test_base, false,
2239          _in_cset_fast_test_length * sizeof(bool));
2240 }
2241 
2242 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2243   return ShenandoahMaxRegionSize;
2244 }
2245 
2246 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2247   return _bytes_allocated_since_cm;
2248 }
2249 
2250 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2251   _bytes_allocated_since_cm = bytes;
2252 }
2253 
2254 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2255   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2256   _next_top_at_mark_starts[index] = addr;
2257 }
2258 
2259 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2260   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2261   return _next_top_at_mark_starts[index];
2262 }
2263 
2264 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2265   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2266   _complete_top_at_mark_starts[index] = addr;
2267 }
2268 
2269 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2270   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2271   return _complete_top_at_mark_starts[index];
2272 }
2273 
2274 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2275   _full_gc_in_progress = in_progress;
2276 }
2277 
2278 bool ShenandoahHeap::is_full_gc_in_progress() const {
2279   return _full_gc_in_progress;
2280 }
2281 
2282 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2283   _update_refs_in_progress = in_progress;
2284 }
2285 
2286 bool ShenandoahHeap::is_update_refs_in_progress() const {
2287   return _update_refs_in_progress;
2288 }
2289 
2290 class NMethodOopInitializer : public OopClosure {
2291 private:
2292   ShenandoahHeap* _heap;
2293 public:
2294   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2295   }
2296 
2297 private:
2298   template <class T>
2299   inline void do_oop_work(T* p) {
2300     T o = oopDesc::load_heap_oop(p);
2301     if (! oopDesc::is_null(o)) {
2302       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2303       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2304       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2305         oopDesc::encode_store_heap_oop(p, obj2);
2306       }
2307     }
2308   }
2309 
2310 public:
2311   void do_oop(oop* o) {
2312     do_oop_work(o);
2313   }
2314   void do_oop(narrowOop* o) {
2315     do_oop_work(o);
2316   }
2317 };
2318 
2319 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2320   NMethodOopInitializer init;
2321   nm->oops_do(&init);
2322   nm->fix_oop_relocations();
2323 }
2324 
2325 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2326 }
2327 
2328 void ShenandoahHeap::pin_object(oop o) {
2329   heap_region_containing(o)->pin();
2330 }
2331 
2332 void ShenandoahHeap::unpin_object(oop o) {
2333   heap_region_containing(o)->unpin();
2334 }
2335 
2336 
2337 GCTimer* ShenandoahHeap::gc_timer() const {
2338   return _gc_timer;
2339 }
2340 
2341 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2342 private:
2343   size_t _garbage;
2344 public:
2345   ShenandoahCountGarbageClosure() : _garbage(0) {
2346   }
2347 
2348   bool doHeapRegion(ShenandoahHeapRegion* r) {
2349     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2350       _garbage += r->garbage();
2351     }
2352     return false;
2353   }
2354 
2355   size_t garbage() {
2356     return _garbage;
2357   }
2358 };
2359 
2360 size_t ShenandoahHeap::garbage() {
2361   ShenandoahCountGarbageClosure cl;
2362   heap_region_iterate(&cl);
2363   return cl.garbage();
2364 }
2365 
2366 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2367   return _connection_matrix;
2368 }
2369 
2370 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2371   return _partial_gc;
2372 }
2373 
2374 void ShenandoahHeap::do_partial_collection() {
2375   partial_gc()->do_partial_collection();
2376 }
2377 
2378 template<class T>
2379 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2380 private:
2381   T cl;
2382   ShenandoahHeap* _heap;
2383   ShenandoahHeapRegionSet* _regions;
2384 
2385 public:
2386   ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions) :
2387     AbstractGangTask("Concurrent Update References Task"),
2388     cl(T()),
2389     _heap(ShenandoahHeap::heap()),
2390     _regions(regions) {
2391   }
2392 
2393   void work(uint worker_id) {
2394     ShenandoahHeapRegion* r = _regions->claim_next();
2395     while (r != NULL) {
2396       if (_heap->in_collection_set(r)) {
2397         HeapWord* bottom = r->bottom();
2398         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2399         if (top > bottom) {
2400           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2401         }
2402       } else {
2403         if (!r->is_empty()) {
2404           _heap->marked_object_oop_safe_iterate(r, &cl);
2405         }
2406       }
2407       if (_heap->cancelled_concgc()) {
2408         return;
2409       }
2410       r = _regions->claim_next();
2411     }
2412   }
2413 };
2414 
2415 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions) {
2416   if (UseShenandoahMatrix) {
2417     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions);
2418     workers()->run_task(&task);
2419   } else {
2420     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions);
2421     workers()->run_task(&task);
2422   }
2423 }
2424 
2425 void ShenandoahHeap::concurrent_update_heap_references() {
2426   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs);
2427   ShenandoahHeapRegionSet* update_regions = regions();
2428   update_regions->clear_current_index();
2429   update_heap_references(update_regions);
2430   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs);
2431 }
2432 
2433 void ShenandoahHeap::prepare_update_refs() {
2434   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2435   set_evacuation_in_progress_at_safepoint(false);
2436   set_update_refs_in_progress(true);
2437   ensure_parsability(true);
2438   if (UseShenandoahMatrix) {
2439     connection_matrix()->clear_all();
2440   }
2441   for (uint i = 0; i < _num_regions; i++) {
2442     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2443     r->set_concurrent_iteration_safe_limit(r->top());
2444   }
2445 }
2446 
2447 void ShenandoahHeap::finish_update_refs() {
2448   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2449 
2450   if (cancelled_concgc()) {
2451     // Finish updating references where we left off.
2452     clear_cancelled_concgc();
2453     ShenandoahHeapRegionSet* update_regions = regions();
2454     update_heap_references(update_regions);
2455   }
2456 
2457   assert(! cancelled_concgc(), "Should have been done right before");
2458   concurrentMark()->update_roots(ShenandoahCollectorPolicy::final_update_refs_roots);
2459 
2460   // Allocations might have happened before we STWed here, record peak:
2461   shenandoahPolicy()->record_peak_occupancy();
2462 
2463   recycle_dirty_regions();
2464   set_need_update_refs(false);
2465 
2466   if (ShenandoahVerify) {
2467     verify_update_refs();
2468   }
2469 
2470   {
2471     // Rebuild the free set
2472     ShenandoahHeapLock hl(this);
2473     _free_regions->clear();
2474     size_t end = _ordered_regions->active_regions();
2475     for (size_t i = 0; i < end; i++) {
2476       ShenandoahHeapRegion* r = _ordered_regions->get(i);
2477       if (!r->is_humongous()) {
2478         assert (!in_collection_set(r), "collection set should be clear");
2479         _free_regions->add_region(r);
2480       }
2481     }
2482   }
2483   set_update_refs_in_progress(false);
2484 }
2485 
2486 class ShenandoahVerifyUpdateRefsClosure : public ExtendedOopClosure {
2487 private:
2488   template <class T>
2489   void do_oop_work(T* p) {
2490     T o = oopDesc::load_heap_oop(p);
2491     if (! oopDesc::is_null(o)) {
2492       oop obj = oopDesc::decode_heap_oop_not_null(o);
2493       guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
2494                 "must not be forwarded");
2495     }
2496   }
2497 public:
2498   void do_oop(oop* p) { do_oop_work(p); }
2499   void do_oop(narrowOop* p) { do_oop_work(p); }
2500 };
2501 
2502 void ShenandoahHeap::verify_update_refs() {
2503 
2504   ensure_parsability(false);
2505 
2506   ShenandoahVerifyUpdateRefsClosure cl;
2507 
2508   // Verify roots.
2509   {
2510     CodeBlobToOopClosure blobsCl(&cl, false);
2511     CLDToOopClosure cldCl(&cl);
2512     ShenandoahRootProcessor rp(this, 1);
2513     rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
2514   }
2515 
2516   // Verify heap.
2517   for (uint i = 0; i < num_regions(); i++) {
2518     ShenandoahHeapRegion* r = regions()->get(i);
2519     marked_object_oop_iterate(r, &cl);
2520   }
2521 }
2522 
2523 #ifdef ASSERT
2524 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2525   assert(_heap_lock == locked, "must be locked");
2526   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2527 }
2528 
2529 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2530   Thread* thr = Thread::current();
2531   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2532          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2533   "must own heap lock or by VM thread at safepoint");
2534 }
2535 
2536 #endif
2537 
2538 void ShenandoahHeap::start_deferred_recycling() {
2539   assert_heaplock_owned_by_current_thread();
2540   _recycled_region_count = 0;
2541 }
2542 
2543 void ShenandoahHeap::defer_recycle(ShenandoahHeapRegion* r) {
2544   assert_heaplock_owned_by_current_thread();
2545   _recycled_regions[_recycled_region_count++] = r->region_number();
2546 }
2547 
2548 void ShenandoahHeap::finish_deferred_recycle() {
2549   assert_heaplock_owned_by_current_thread();
2550   if (UseShenandoahMatrix) {
2551     for (size_t i = 0; i < _recycled_region_count; i++) {
2552       regions()->get(_recycled_regions[i])->recycle_no_matrix();
2553     }
2554     connection_matrix()->clear_batched(_recycled_regions, _recycled_region_count);
2555   } else {
2556     for (size_t i = 0; i < _recycled_region_count; i++) {
2557       regions()->get(_recycled_regions[i])->recycle();
2558     }
2559   }
2560 }