1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  48 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  49 
  50 #include "runtime/vmThread.hpp"
  51 #include "services/mallocTracker.hpp"
  52 
  53 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  54 
  55 #ifdef ASSERT
  56 template <class T>
  57 void AssertToSpaceClosure::do_oop_nv(T* p) {
  58   T o = oopDesc::load_heap_oop(p);
  59   if (! oopDesc::is_null(o)) {
  60     oop obj = oopDesc::decode_heap_oop_not_null(o);
  61     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  62            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  63            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  64   }
  65 }
  66 
  67 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  68 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  69 #endif
  70 
  71 const char* ShenandoahHeap::name() const {
  72   return "Shenandoah";
  73 }
  74 
  75 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  76   HeapWord* cur = NULL;
  77   for (cur = start; cur < end; cur++) {
  78     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  79   }
  80 }
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahHeapRegionSet* _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap0_base;
  88   char* _bitmap1_base;
  89 public:
  90   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  91                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  92                          size_t page_size) :
  93     AbstractGangTask("Shenandoah PreTouch",
  94                      Universe::is_fully_initialized() ? GCId::current_raw() :
  95                                                         // During VM initialization there is
  96                                                         // no GC cycle that this task can be
  97                                                         // associated with.
  98                                                         GCId::undefined()),
  99     _bitmap0_base(bitmap0_base),
 100     _bitmap1_base(bitmap1_base),
 101     _regions(regions),
 102     _bitmap_size(bitmap_size),
 103     _page_size(page_size) {
 104     _regions->clear_current_index();
 105   };
 106 
 107   virtual void work(uint worker_id) {
 108     ShenandoahHeapRegion* r = _regions->claim_next();
 109     while (r != NULL) {
 110       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 111                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 112       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 113 
 114       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 115       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 116       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 117 
 118       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 119                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 120       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 121 
 122       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 123                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 124       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 125 
 126       r = _regions->claim_next();
 127     }
 128   }
 129 };
 130 
 131 jint ShenandoahHeap::initialize() {
 132   CollectedHeap::pre_initialize();
 133 
 134   BrooksPointer::initial_checks();
 135 
 136   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 137   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 138 
 139   Universe::check_alignment(max_byte_size,
 140                             ShenandoahHeapRegion::region_size_bytes(),
 141                             "shenandoah heap");
 142   Universe::check_alignment(init_byte_size,
 143                             ShenandoahHeapRegion::region_size_bytes(),
 144                             "shenandoah heap");
 145 
 146   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 147                                                  Arguments::conservative_max_heap_alignment());
 148   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 149 
 150   set_barrier_set(new ShenandoahBarrierSet(this));
 151   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 152   _storage.initialize(pgc_rs, init_byte_size);
 153 
 154   _num_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 155   _max_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   _initialSize = _num_regions * ShenandoahHeapRegion::region_size_bytes();
 157   size_t regionSizeWords = ShenandoahHeapRegion::region_size_bytes() / HeapWordSize;
 158   assert(init_byte_size == _initialSize, "tautology");
 159   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 160   _collection_set = new ShenandoahCollectionSet(_max_regions);
 161   _free_regions = new ShenandoahFreeSet(_max_regions);
 162 
 163   // Initialize fast collection set test structure.
 164   _in_cset_fast_test_length = _max_regions;
 165   _in_cset_fast_test_base =
 166                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 167   _in_cset_fast_test = _in_cset_fast_test_base -
 168                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 169 
 170   _next_top_at_mark_starts_base =
 171                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 172   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 173                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 174 
 175   _complete_top_at_mark_starts_base =
 176                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 177   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 178                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 179 
 180   size_t i = 0;
 181   for (i = 0; i < _num_regions; i++) {
 182     _in_cset_fast_test_base[i] = false; // Not in cset
 183     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 184     _complete_top_at_mark_starts_base[i] = bottom;
 185     _next_top_at_mark_starts_base[i] = bottom;
 186   }
 187 
 188   {
 189     ShenandoahHeapLock lock(this);
 190     for (i = 0; i < _num_regions; i++) {
 191       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 192                                                                regionSizeWords * i, regionSizeWords, i);
 193       _free_regions->add_region(current);
 194       _ordered_regions->add_region(current);
 195     }
 196   }
 197   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 198   _first_region = _ordered_regions->get(0);
 199   assert((((size_t) base()) &
 200           (ShenandoahHeapRegion::region_size_bytes() - 1)) == 0,
 201          "misaligned heap: "PTR_FORMAT, p2i(base()));
 202 
 203   if (log_is_enabled(Trace, gc, region)) {
 204     ResourceMark rm;
 205     outputStream* out = Log(gc, region)::trace_stream();
 206     log_trace(gc, region)("All Regions");
 207     _ordered_regions->print(out);
 208     log_trace(gc, region)("Free Regions");
 209     _free_regions->print(out);
 210   }
 211 
 212   _recycled_regions = NEW_C_HEAP_ARRAY(size_t, _max_regions, mtGC);
 213   _recycled_region_count = 0;
 214 
 215   // The call below uses stuff (the SATB* things) that are in G1, but probably
 216   // belong into a shared location.
 217   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 218                                                SATB_Q_FL_lock,
 219                                                20 /*G1SATBProcessCompletedThreshold */,
 220                                                Shared_SATB_Q_lock);
 221 
 222   // Reserve space for prev and next bitmap.
 223   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 224   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 225 
 226   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 227 
 228   ReservedSpace bitmap0(_bitmap_size, page_size);
 229   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 230   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 231   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 232 
 233   ReservedSpace bitmap1(_bitmap_size, page_size);
 234   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 235   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 236   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 237 
 238   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 239     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 240     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 241                               "couldn't allocate verification bitmap");
 242     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 243     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 244     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 245   }
 246 
 247   if (ShenandoahAlwaysPreTouch) {
 248     assert (!AlwaysPreTouch, "Should have been overridden");
 249 
 250     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 251     // before initialize() below zeroes it with initializing thread. For any given region,
 252     // we touch the region and the corresponding bitmaps from the same thread.
 253 
 254     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 255                        _ordered_regions->count(), page_size);
 256     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 257     _workers->run_task(&cl);
 258   }
 259 
 260   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 261   _complete_mark_bit_map = &_mark_bit_map0;
 262 
 263   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 264   _next_mark_bit_map = &_mark_bit_map1;
 265 
 266   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 267   _partial_gc = new ShenandoahPartialGC(this, _max_regions);
 268 
 269   _monitoring_support = new ShenandoahMonitoringSupport(this);
 270 
 271   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 272 
 273   ShenandoahMarkCompact::initialize();
 274 
 275   return JNI_OK;
 276 }
 277 
 278 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 279   CollectedHeap(),
 280   _shenandoah_policy(policy),
 281   _concurrent_mark_in_progress(0),
 282   _evacuation_in_progress(0),
 283   _full_gc_in_progress(false),
 284   _update_refs_in_progress(false),
 285   _free_regions(NULL),
 286   _collection_set(NULL),
 287   _bytes_allocated_since_cm(0),
 288   _bytes_allocated_during_cm(0),
 289   _allocated_last_gc(0),
 290   _used_start_gc(0),
 291   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 292   _ref_processor(NULL),
 293   _in_cset_fast_test(NULL),
 294   _in_cset_fast_test_base(NULL),
 295   _next_top_at_mark_starts(NULL),
 296   _next_top_at_mark_starts_base(NULL),
 297   _complete_top_at_mark_starts(NULL),
 298   _complete_top_at_mark_starts_base(NULL),
 299   _mark_bit_map0(),
 300   _mark_bit_map1(),
 301   _connection_matrix(NULL),
 302   _cancelled_concgc(false),
 303   _need_update_refs(false),
 304   _need_reset_bitmaps(false),
 305   _heap_lock(0),
 306 #ifdef ASSERT
 307   _heap_lock_owner(NULL),
 308 #endif
 309   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 310 
 311 {
 312   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 313   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 314   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 315 
 316   _scm = new ShenandoahConcurrentMark();
 317   _used = 0;
 318 
 319   _max_workers = MAX2(_max_workers, 1U);
 320   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 321                             /* are_GC_task_threads */true,
 322                             /* are_ConcurrentGC_threads */false);
 323   if (_workers == NULL) {
 324     vm_exit_during_initialization("Failed necessary allocation.");
 325   } else {
 326     _workers->initialize_workers();
 327   }
 328 }
 329 
 330 class ResetNextBitmapTask : public AbstractGangTask {
 331 private:
 332   ShenandoahHeapRegionSet* _regions;
 333 
 334 public:
 335   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 336     AbstractGangTask("Parallel Reset Bitmap Task"),
 337     _regions(regions) {
 338     _regions->clear_current_index();
 339   }
 340 
 341   void work(uint worker_id) {
 342     ShenandoahHeapRegion* region = _regions->claim_next();
 343     ShenandoahHeap* heap = ShenandoahHeap::heap();
 344     while (region != NULL) {
 345       HeapWord* bottom = region->bottom();
 346       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 347       if (top > bottom) {
 348         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 349       }
 350       region = _regions->claim_next();
 351     }
 352   }
 353 };
 354 
 355 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 356   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 357   workers->run_task(&task);
 358 }
 359 
 360 class ResetCompleteBitmapTask : public AbstractGangTask {
 361 private:
 362   ShenandoahHeapRegionSet* _regions;
 363 
 364 public:
 365   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 366     AbstractGangTask("Parallel Reset Bitmap Task"),
 367     _regions(regions) {
 368     _regions->clear_current_index();
 369   }
 370 
 371   void work(uint worker_id) {
 372     ShenandoahHeapRegion* region = _regions->claim_next();
 373     ShenandoahHeap* heap = ShenandoahHeap::heap();
 374     while (region != NULL) {
 375       HeapWord* bottom = region->bottom();
 376       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 377       if (top > bottom) {
 378         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 379       }
 380       region = _regions->claim_next();
 381     }
 382   }
 383 };
 384 
 385 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 386   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 387   workers->run_task(&task);
 388 }
 389 
 390 bool ShenandoahHeap::is_next_bitmap_clear() {
 391   HeapWord* start = _ordered_regions->bottom();
 392   HeapWord* end = _ordered_regions->end();
 393   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 394 }
 395 
 396 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 397   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 398 }
 399 
 400 void ShenandoahHeap::print_on(outputStream* st) const {
 401   st->print("Shenandoah Heap");
 402   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 403   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 404             p2i(reserved_region().start()),
 405             p2i(reserved_region().end()));
 406   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::region_size_bytes() / K);
 407   if (_concurrent_mark_in_progress) {
 408     st->print("marking ");
 409   }
 410   if (_evacuation_in_progress) {
 411     st->print("evacuating ");
 412   }
 413   if (cancelled_concgc()) {
 414     st->print("cancelled ");
 415   }
 416   st->print("\n");
 417 
 418   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 419   st->print   ("Virtual space:");
 420   if (_storage.special()) st->print(" (pinned in memory)");
 421   st->cr();
 422   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 423   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 424   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 425   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 426 
 427   if (Verbose) {
 428     print_heap_regions(st);
 429   }
 430 }
 431 
 432 class InitGCLABClosure : public ThreadClosure {
 433 public:
 434   void do_thread(Thread* thread) {
 435     thread->gclab().initialize(true);
 436   }
 437 };
 438 
 439 void ShenandoahHeap::post_initialize() {
 440   if (UseTLAB) {
 441     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 442     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 443     // Their initialization should be handled separately. Is we miss some threads here,
 444     // then any other TLAB-related activity would fail with asserts.
 445 
 446     InitGCLABClosure init_gclabs;
 447     {
 448       MutexLocker ml(Threads_lock);
 449       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 450         init_gclabs.do_thread(thread);
 451       }
 452     }
 453     gc_threads_do(&init_gclabs);
 454 
 455     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 456     // Now, we will let WorkGang to initialize gclab when new worker is created.
 457     _workers->set_initialize_gclab();
 458   }
 459 
 460   _scm->initialize(_max_workers);
 461 
 462   ref_processing_init();
 463 }
 464 
 465 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 466   size_t sum;
 467 public:
 468 
 469   CalculateUsedRegionClosure() {
 470     sum = 0;
 471   }
 472 
 473   bool doHeapRegion(ShenandoahHeapRegion* r) {
 474     sum = sum + r->used();
 475     return false;
 476   }
 477 
 478   size_t getResult() { return sum;}
 479 };
 480 
 481 size_t ShenandoahHeap::calculateUsed() {
 482   CalculateUsedRegionClosure cl;
 483   heap_region_iterate(&cl);
 484   return cl.getResult();
 485 }
 486 
 487 void ShenandoahHeap::verify_heap_size_consistency() {
 488 
 489   assert(calculateUsed() == used(),
 490          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 491 }
 492 
 493 size_t ShenandoahHeap::used() const {
 494   OrderAccess::acquire();
 495   return _used;
 496 }
 497 
 498 void ShenandoahHeap::increase_used(size_t bytes) {
 499   assert_heaplock_or_safepoint();
 500   _used += bytes;
 501 }
 502 
 503 void ShenandoahHeap::set_used(size_t bytes) {
 504   assert_heaplock_or_safepoint();
 505   _used = bytes;
 506 }
 507 
 508 void ShenandoahHeap::decrease_used(size_t bytes) {
 509   assert_heaplock_or_safepoint();
 510   assert(_used >= bytes, "never decrease heap size by more than we've left");
 511   _used -= bytes;
 512 }
 513 
 514 size_t ShenandoahHeap::capacity() const {
 515   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 516 }
 517 
 518 bool ShenandoahHeap::is_maximal_no_gc() const {
 519   Unimplemented();
 520   return true;
 521 }
 522 
 523 size_t ShenandoahHeap::max_capacity() const {
 524   return _max_regions * ShenandoahHeapRegion::region_size_bytes();
 525 }
 526 
 527 size_t ShenandoahHeap::min_capacity() const {
 528   return _initialSize;
 529 }
 530 
 531 VirtualSpace* ShenandoahHeap::storage() const {
 532   return (VirtualSpace*) &_storage;
 533 }
 534 
 535 bool ShenandoahHeap::is_in(const void* p) const {
 536   HeapWord* heap_base = (HeapWord*) base();
 537   HeapWord* last_region_end = heap_base + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * _num_regions;
 538   return p >= heap_base && p < last_region_end;
 539 }
 540 
 541 bool ShenandoahHeap::is_scavengable(const void* p) {
 542   return true;
 543 }
 544 
 545 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 546   // Retain tlab and allocate object in shared space if
 547   // the amount free in the tlab is too large to discard.
 548   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 549     thread->gclab().record_slow_allocation(size);
 550     return NULL;
 551   }
 552 
 553   // Discard gclab and allocate a new one.
 554   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 555   size_t new_gclab_size = thread->gclab().compute_size(size);
 556 
 557   thread->gclab().clear_before_allocation();
 558 
 559   if (new_gclab_size == 0) {
 560     return NULL;
 561   }
 562 
 563   // Allocate a new GCLAB...
 564   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 565   if (obj == NULL) {
 566     return NULL;
 567   }
 568 
 569   if (ZeroTLAB) {
 570     // ..and clear it.
 571     Copy::zero_to_words(obj, new_gclab_size);
 572   } else {
 573     // ...and zap just allocated object.
 574 #ifdef ASSERT
 575     // Skip mangling the space corresponding to the object header to
 576     // ensure that the returned space is not considered parsable by
 577     // any concurrent GC thread.
 578     size_t hdr_size = oopDesc::header_size();
 579     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 580 #endif // ASSERT
 581   }
 582   thread->gclab().fill(obj, obj + size, new_gclab_size);
 583   return obj;
 584 }
 585 
 586 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 587 #ifdef ASSERT
 588   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 589 #endif
 590   return allocate_new_tlab(word_size, false);
 591 }
 592 
 593 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 594 #ifdef ASSERT
 595   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 596 #endif
 597   return allocate_new_tlab(word_size, true);
 598 }
 599 
 600 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 601 
 602   HeapWord* result = allocate_memory(word_size, evacuating);
 603 
 604   if (result != NULL) {
 605     assert(! in_collection_set(result), "Never allocate in dirty region");
 606     _bytes_allocated_since_cm += word_size * HeapWordSize;
 607 
 608     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 609 
 610   }
 611   return result;
 612 }
 613 
 614 ShenandoahHeap* ShenandoahHeap::heap() {
 615   CollectedHeap* heap = Universe::heap();
 616   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 617   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 618   return (ShenandoahHeap*) heap;
 619 }
 620 
 621 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 622   CollectedHeap* heap = Universe::heap();
 623   return (ShenandoahHeap*) heap;
 624 }
 625 
 626 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 627 
 628   ShenandoahHeapLock heap_lock(this);
 629 
 630   HeapWord* result = allocate_memory_under_lock(word_size);
 631   size_t grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::region_size_bytes() - 1) / ShenandoahHeapRegion::region_size_bytes();
 632 
 633   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 634     grow_heap_by(grow_by);
 635     result = allocate_memory_under_lock(word_size);
 636   }
 637 
 638   return result;
 639 }
 640 
 641 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 642   HeapWord* result = NULL;
 643   result = allocate_memory_work(word_size);
 644 
 645   if (!evacuating) {
 646     // Allocation failed, try full-GC, then retry allocation.
 647     //
 648     // It might happen that one of the threads requesting allocation would unblock
 649     // way later after full-GC happened, only to fail the second allocation, because
 650     // other threads have already depleted the free storage. In this case, a better
 651     // strategy would be to try full-GC again.
 652     //
 653     // Lacking the way to detect progress from "collect" call, we are left with blindly
 654     // retrying for some bounded number of times.
 655     // TODO: Poll if Full GC made enough progress to warrant retry.
 656     int tries = 0;
 657     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 658       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 659                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 660       collect(GCCause::_allocation_failure);
 661       result = allocate_memory_work(word_size);
 662     }
 663   }
 664 
 665   // Only update monitoring counters when not calling from a write-barrier.
 666   // Otherwise we might attempt to grab the Service_lock, which we must
 667   // not do when coming from a write-barrier (because the thread might
 668   // already hold the Compile_lock).
 669   if (! evacuating) {
 670     monitoring_support()->update_counters();
 671   }
 672 
 673   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 674                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 675 
 676   return result;
 677 }
 678 
 679 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 680   assert_heaplock_owned_by_current_thread();
 681 
 682   if (word_size * HeapWordSize > ShenandoahHeapRegion::region_size_bytes()) {
 683     return allocate_large_memory(word_size);
 684   }
 685 
 686   // Not enough memory in free region set.
 687   // Coming out of full GC, it is possible that there is not
 688   // free region available, so current_index may not be valid.
 689   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 690 
 691   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 692 
 693   if (my_current_region == NULL) {
 694     return NULL; // No more room to make a new region. OOM.
 695   }
 696   assert(my_current_region != NULL, "should have a region at this point");
 697 
 698 #ifdef ASSERT
 699   if (in_collection_set(my_current_region)) {
 700     print_heap_regions();
 701   }
 702 #endif
 703   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 704   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 705 
 706   HeapWord* result = my_current_region->allocate(word_size);
 707 
 708   while (result == NULL) {
 709     // 2nd attempt. Try next region.
 710 #ifdef ASSERT
 711     if (my_current_region->free() > 0) {
 712       log_debug(gc, alloc)("Retire region with " SIZE_FORMAT " bytes free", my_current_region->free());
 713     }
 714 #endif
 715     _free_regions->increase_used(my_current_region->free());
 716     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 717     assert(next_region != my_current_region, "must not get current again");
 718     my_current_region = next_region;
 719 
 720     if (my_current_region == NULL) {
 721       return NULL; // No more room to make a new region. OOM.
 722     }
 723     assert(my_current_region != NULL, "should have a region at this point");
 724     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 725     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 726     result = my_current_region->allocate(word_size);
 727   }
 728 
 729   my_current_region->increase_live_data_words(word_size);
 730   increase_used(word_size * HeapWordSize);
 731   _free_regions->increase_used(word_size * HeapWordSize);
 732   return result;
 733 }
 734 
 735 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 736   assert_heaplock_owned_by_current_thread();
 737 
 738   size_t required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 739   if (required_regions > _max_regions) return NULL;
 740 
 741   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 742 
 743   HeapWord* result = NULL;
 744 
 745   if (r != NULL)  {
 746     result = r->bottom();
 747 
 748     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 749                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 750   } else {
 751     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 752                              (words * HeapWordSize) / K, p2i(result));
 753   }
 754 
 755 
 756   return result;
 757 
 758 }
 759 
 760 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 761                                         bool*  gc_overhead_limit_was_exceeded) {
 762 
 763   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), false);
 764   HeapWord* result = filler + BrooksPointer::word_size();
 765   if (filler != NULL) {
 766     BrooksPointer::initialize(oop(result));
 767     _bytes_allocated_since_cm += size * HeapWordSize;
 768 
 769     assert(! in_collection_set(result), "never allocate in targetted region");
 770     return result;
 771   } else {
 772     /*
 773     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 774                   size, used(), _bytes_allocated_since_cm);
 775     {
 776       print_heap_regions();
 777       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 778       _free_regions->print();
 779     }
 780     */
 781     return NULL;
 782   }
 783 }
 784 
 785 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 786 private:
 787   ShenandoahHeap* _heap;
 788   Thread* _thread;
 789   public:
 790   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 791     _heap(heap), _thread(Thread::current()) {
 792   }
 793 
 794   void do_object(oop p) {
 795 
 796     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 797 
 798     assert(_heap->is_marked_complete(p), "expect only marked objects");
 799     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 800       bool evac;
 801       _heap->evacuate_object(p, _thread, evac);
 802     }
 803   }
 804 };
 805 
 806 #ifdef ASSERT
 807 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 808 
 809 public:
 810 
 811   void do_object(oop p) {
 812     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 813       oop p_prime = oopDesc::bs()->read_barrier(p);
 814       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 815       if (p->klass() != p_prime->klass()) {
 816         tty->print_cr("copy has different class than original:");
 817         p->klass()->print_on(tty);
 818         p_prime->klass()->print_on(tty);
 819       }
 820       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 821       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 822       assert(p->size() == p_prime->size(), "Should be the same size");
 823       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 824     }
 825   }
 826 };
 827 
 828 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 829   VerifyEvacuatedObjectClosure verify_evacuation;
 830   marked_object_iterate(from_region, &verify_evacuation);
 831 }
 832 #endif
 833 
 834 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 835 
 836   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 837 
 838   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 839 
 840   marked_object_iterate(from_region, &evacuate_region);
 841 
 842 #ifdef ASSERT
 843   if (ShenandoahVerify && ! cancelled_concgc()) {
 844     verify_evacuated_region(from_region);
 845   }
 846 #endif
 847 }
 848 
 849 class ParallelEvacuationTask : public AbstractGangTask {
 850 private:
 851   ShenandoahHeap* _sh;
 852   ShenandoahCollectionSet* _cs;
 853 
 854 public:
 855   ParallelEvacuationTask(ShenandoahHeap* sh,
 856                          ShenandoahCollectionSet* cs) :
 857     AbstractGangTask("Parallel Evacuation Task"),
 858     _cs(cs),
 859     _sh(sh) {}
 860 
 861   void work(uint worker_id) {
 862 
 863     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 864 
 865     while (from_hr != NULL) {
 866       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 867                                     worker_id,
 868                                     from_hr->region_number());
 869 
 870       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 871       _sh->parallel_evacuate_region(from_hr);
 872 
 873       if (_sh->cancelled_concgc()) {
 874         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 875         break;
 876       }
 877       from_hr = _cs->claim_next();
 878     }
 879   }
 880 };
 881 
 882 void ShenandoahHeap::recycle_dirty_regions() {
 883   ShenandoahHeapLock lock(this);
 884 
 885   size_t bytes_reclaimed = 0;
 886 
 887   ShenandoahHeapRegionSet* set = regions();
 888   set->clear_current_index();
 889 
 890   start_deferred_recycling();
 891 
 892   ShenandoahHeapRegion* r = set->claim_next();
 893   while (r != NULL) {
 894     if (in_collection_set(r)) {
 895       decrease_used(r->used());
 896       bytes_reclaimed += r->used();
 897       defer_recycle(r);
 898     }
 899     r = set->claim_next();
 900   }
 901 
 902   finish_deferred_recycle();
 903 
 904   _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
 905   if (! cancelled_concgc()) {
 906     clear_cset_fast_test();
 907   }
 908 }
 909 
 910 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 911   return _free_regions;
 912 }
 913 
 914 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 915   _ordered_regions->print(st);
 916 }
 917 
 918 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 919 private:
 920   int _index;
 921   const char* _prefix;
 922 
 923 public:
 924   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 925 
 926 private:
 927   template <class T>
 928   inline void do_oop_work(T* p) {
 929     oop o = oopDesc::load_decode_heap_oop(p);
 930     if (o != NULL) {
 931       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 932         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 933                       _prefix, _index,
 934                       p2i(p), p2i(o),
 935                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 936                       o->klass()->internal_name(), p2i(o->klass()));
 937       } else {
 938         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 939                       _prefix, _index,
 940                       p2i(p), p2i(o));
 941       }
 942     } else {
 943       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 944     }
 945     _index++;
 946   }
 947 
 948 public:
 949   void do_oop(oop* p) {
 950     do_oop_work(p);
 951   }
 952 
 953   void do_oop(narrowOop* p) {
 954     do_oop_work(p);
 955   }
 956 
 957 };
 958 
 959 class PrintAllRefsObjectClosure : public ObjectClosure {
 960   const char* _prefix;
 961 
 962 public:
 963   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 964 
 965   void do_object(oop p) {
 966     if (ShenandoahHeap::heap()->is_in(p)) {
 967         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 968                       _prefix, p2i(p),
 969                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 970                       p->klass()->internal_name(), p2i(p->klass()));
 971         PrintAllRefsOopClosure cl(_prefix);
 972         p->oop_iterate(&cl);
 973       }
 974   }
 975 };
 976 
 977 void ShenandoahHeap::print_all_refs(const char* prefix) {
 978   tty->print_cr("printing all references in the heap");
 979   tty->print_cr("root references:");
 980 
 981   ensure_parsability(false);
 982 
 983   PrintAllRefsOopClosure cl(prefix);
 984   roots_iterate(&cl);
 985 
 986   tty->print_cr("heap references:");
 987   PrintAllRefsObjectClosure cl2(prefix);
 988   object_iterate(&cl2);
 989 }
 990 
 991 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 992 private:
 993   ShenandoahHeap*  _heap;
 994 
 995 public:
 996   VerifyAfterMarkingOopClosure() :
 997     _heap(ShenandoahHeap::heap()) { }
 998 
 999 private:
1000   template <class T>
1001   inline void do_oop_work(T* p) {
1002     oop o = oopDesc::load_decode_heap_oop(p);
1003     if (o != NULL) {
1004       if (! _heap->is_marked_complete(o)) {
1005         _heap->print_heap_regions();
1006         _heap->print_all_refs("post-mark");
1007         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1008                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1009         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1010 
1011         tty->print_cr("oop class: %s", o->klass()->internal_name());
1012         if (_heap->is_in(p)) {
1013           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1014           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1015           referrer->print();
1016           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1017         }
1018         tty->print_cr("heap region containing object:");
1019         _heap->heap_region_containing(o)->print();
1020         tty->print_cr("heap region containing referrer:");
1021         _heap->heap_region_containing(p)->print();
1022         tty->print_cr("heap region containing forwardee:");
1023         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1024       }
1025       assert(o->is_oop(), "oop must be an oop");
1026       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1027       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1028         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1029                       p2i(p),
1030                       BOOL_TO_STR(_heap->in_collection_set(p)),
1031                       p2i(o),
1032                       BOOL_TO_STR(_heap->in_collection_set(o)),
1033                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1034                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1035         tty->print_cr("oop class: %s", o->klass()->internal_name());
1036       }
1037       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1038       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1039       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1040     }
1041   }
1042 
1043 public:
1044   void do_oop(oop* p) {
1045     do_oop_work(p);
1046   }
1047 
1048   void do_oop(narrowOop* p) {
1049     do_oop_work(p);
1050   }
1051 
1052 };
1053 
1054 void ShenandoahHeap::verify_heap_after_marking() {
1055 
1056   verify_heap_size_consistency();
1057 
1058   log_trace(gc)("verifying heap after marking");
1059 
1060   VerifyAfterMarkingOopClosure cl;
1061   roots_iterate(&cl);
1062   ObjectToOopClosure objs(&cl);
1063   object_iterate(&objs);
1064 }
1065 
1066 
1067 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1068   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1069 
1070   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1071   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1072   size_t required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1073   size_t index = r->region_number();
1074 
1075 
1076   assert(!r->has_live(), "liveness must be zero");
1077 
1078   for(size_t i = 0; i < required_regions; i++) {
1079 
1080     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1081 
1082     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1083            "expect correct humongous start or continuation");
1084 
1085     if (log_is_enabled(Debug, gc, humongous)) {
1086       log_debug(gc, humongous)("reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1087       ResourceMark rm;
1088       outputStream* out = Log(gc, humongous)::debug_stream();
1089       region->print_on(out);
1090     }
1091 
1092     region->recycle();
1093     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::region_size_bytes());
1094   }
1095 }
1096 
1097 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1098 
1099   bool doHeapRegion(ShenandoahHeapRegion* r) {
1100     ShenandoahHeap* heap = ShenandoahHeap::heap();
1101 
1102     if (r->is_humongous_start()) {
1103       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1104       if (! heap->is_marked_complete(humongous_obj)) {
1105 
1106         heap->reclaim_humongous_region_at(r);
1107       }
1108     }
1109     return false;
1110   }
1111 };
1112 
1113 #ifdef ASSERT
1114 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1115   bool doHeapRegion(ShenandoahHeapRegion* r) {
1116     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1117     return false;
1118   }
1119 };
1120 #endif
1121 
1122 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1123   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1124 
1125   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1126 
1127   if (!cancelled_concgc()) {
1128     // Allocations might have happened before we STWed here, record peak:
1129     shenandoahPolicy()->record_peak_occupancy();
1130 
1131     recycle_dirty_regions();
1132 
1133     ensure_parsability(true);
1134 
1135     if (UseShenandoahMatrix && PrintShenandoahMatrix) {
1136       outputStream* log = Log(gc)::info_stream();
1137       connection_matrix()->print_on(log);
1138     }
1139 
1140     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1141       verify_heap_reachable_at_safepoint();
1142     }
1143 
1144 #ifdef ASSERT
1145     if (ShenandoahVerify) {
1146       verify_heap_after_marking();
1147     }
1148 #endif
1149 
1150     // NOTE: This needs to be done during a stop the world pause, because
1151     // putting regions into the collection set concurrently with Java threads
1152     // will create a race. In particular, acmp could fail because when we
1153     // resolve the first operand, the containing region might not yet be in
1154     // the collection set, and thus return the original oop. When the 2nd
1155     // operand gets resolved, the region could be in the collection set
1156     // and the oop gets evacuated. If both operands have originally been
1157     // the same, we get false negatives.
1158 
1159     {
1160       ShenandoahHeapLock lock(this);
1161       _collection_set->clear();
1162       _free_regions->clear();
1163 
1164       ShenandoahReclaimHumongousRegionsClosure reclaim;
1165       heap_region_iterate(&reclaim);
1166 
1167 #ifdef ASSERT
1168       CheckCollectionSetClosure ccsc;
1169       _ordered_regions->heap_region_iterate(&ccsc);
1170 #endif
1171 
1172       _shenandoah_policy->choose_collection_set(_collection_set);
1173 
1174       _shenandoah_policy->choose_free_set(_free_regions);
1175     }
1176 
1177     _bytes_allocated_since_cm = 0;
1178 
1179     Universe::update_heap_info_at_gc();
1180   }
1181 }
1182 
1183 
1184 class RetireTLABClosure : public ThreadClosure {
1185 private:
1186   bool _retire;
1187 
1188 public:
1189   RetireTLABClosure(bool retire) : _retire(retire) {
1190   }
1191 
1192   void do_thread(Thread* thread) {
1193     thread->gclab().make_parsable(_retire);
1194   }
1195 };
1196 
1197 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1198   if (UseTLAB) {
1199     CollectedHeap::ensure_parsability(retire_tlabs);
1200     RetireTLABClosure cl(retire_tlabs);
1201     Threads::threads_do(&cl);
1202   }
1203 }
1204 
1205 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1206 private:
1207   ShenandoahHeap* _heap;
1208   Thread* _thread;
1209 public:
1210   ShenandoahEvacuateUpdateRootsClosure() :
1211     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1212   }
1213 
1214 private:
1215   template <class T>
1216   void do_oop_work(T* p) {
1217     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1218 
1219     T o = oopDesc::load_heap_oop(p);
1220     if (! oopDesc::is_null(o)) {
1221       oop obj = oopDesc::decode_heap_oop_not_null(o);
1222       if (_heap->in_collection_set(obj)) {
1223         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1224                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1225         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1226         if (oopDesc::unsafe_equals(resolved, obj)) {
1227           bool evac;
1228           resolved = _heap->evacuate_object(obj, _thread, evac);
1229         }
1230         oopDesc::encode_store_heap_oop(p, resolved);
1231       }
1232     }
1233 #ifdef ASSERT
1234     else {
1235       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1236       //               p2i(p),
1237       //               p2i((HeapWord*) obj),
1238       //               BOOL_TO_STR(_heap->is_in(obj)),
1239       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1240       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1241     }
1242 #endif
1243   }
1244 
1245 public:
1246   void do_oop(oop* p) {
1247     do_oop_work(p);
1248   }
1249   void do_oop(narrowOop* p) {
1250     do_oop_work(p);
1251   }
1252 };
1253 
1254 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1255   ShenandoahRootEvacuator* _rp;
1256 public:
1257 
1258   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1259     AbstractGangTask("Shenandoah evacuate and update roots"),
1260     _rp(rp)
1261   {
1262     // Nothing else to do.
1263   }
1264 
1265   void work(uint worker_id) {
1266     ShenandoahEvacuateUpdateRootsClosure cl;
1267     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1268 
1269     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1270   }
1271 };
1272 
1273 class ShenandoahFixRootsTask : public AbstractGangTask {
1274   ShenandoahRootEvacuator* _rp;
1275 public:
1276 
1277   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1278     AbstractGangTask("Shenandoah update roots"),
1279     _rp(rp)
1280   {
1281     // Nothing else to do.
1282   }
1283 
1284   void work(uint worker_id) {
1285     SCMUpdateRefsClosure cl;
1286     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1287 
1288     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1289   }
1290 };
1291 void ShenandoahHeap::evacuate_and_update_roots() {
1292 
1293   COMPILER2_PRESENT(DerivedPointerTable::clear());
1294 
1295   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1296 
1297   {
1298     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::init_evac);
1299     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1300     workers()->run_task(&roots_task);
1301   }
1302 
1303   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1304 
1305   if (cancelled_concgc()) {
1306     // If initial evacuation has been cancelled, we need to update all references
1307     // after all workers have finished. Otherwise we might run into the following problem:
1308     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1309     // GC thread 2 evacuates the same object X to to-space
1310     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1311     // clear() and update_pointers() must always be called in pairs,
1312     // cannot nest with above clear()/update_pointers().
1313     COMPILER2_PRESENT(DerivedPointerTable::clear());
1314     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::init_evac);
1315     ShenandoahFixRootsTask update_roots_task(&rp);
1316     workers()->run_task(&update_roots_task);
1317     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1318   }
1319 
1320 #ifdef ASSERT
1321   {
1322     AssertToSpaceClosure cl;
1323     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1324     ShenandoahRootEvacuator rp(this, 1);
1325     rp.process_evacuate_roots(&cl, &code_cl, 0);
1326   }
1327 #endif
1328 }
1329 
1330 
1331 void ShenandoahHeap::do_evacuation() {
1332 
1333   parallel_evacuate();
1334 
1335   if (ShenandoahVerify && ! cancelled_concgc()) {
1336     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1337     if (Thread::current()->is_VM_thread()) {
1338       verify_after_evacuation.doit();
1339     } else {
1340       VMThread::execute(&verify_after_evacuation);
1341     }
1342   }
1343 
1344 }
1345 
1346 void ShenandoahHeap::parallel_evacuate() {
1347   log_develop_trace(gc)("starting parallel_evacuate");
1348 
1349   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1350 
1351   if (log_is_enabled(Trace, gc, region)) {
1352     ResourceMark rm;
1353     outputStream *out = Log(gc, region)::trace_stream();
1354     out->print("Printing all available regions");
1355     print_heap_regions(out);
1356   }
1357 
1358   if (log_is_enabled(Trace, gc, cset)) {
1359     ResourceMark rm;
1360     outputStream *out = Log(gc, cset)::trace_stream();
1361     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1362     _collection_set->print(out);
1363 
1364     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1365     _free_regions->print(out);
1366   }
1367 
1368   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1369 
1370 
1371   workers()->run_task(&evacuationTask);
1372 
1373   if (log_is_enabled(Trace, gc, cset)) {
1374     ResourceMark rm;
1375     outputStream *out = Log(gc, cset)::trace_stream();
1376     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1377                _collection_set->count());
1378 
1379     _collection_set->print(out);
1380 
1381     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1382                _free_regions->count());
1383     _free_regions->print(out);
1384 
1385   }
1386 
1387   if (log_is_enabled(Trace, gc, region)) {
1388     ResourceMark rm;
1389     outputStream *out = Log(gc, region)::trace_stream();
1390     out->print_cr("all regions after evacuation:");
1391     print_heap_regions(out);
1392   }
1393 
1394   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1395 }
1396 
1397 class VerifyEvacuationClosure: public ExtendedOopClosure {
1398 private:
1399   ShenandoahHeap*  _heap;
1400   ShenandoahHeapRegion* _from_region;
1401 
1402 public:
1403   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1404     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1405 private:
1406   template <class T>
1407   inline void do_oop_work(T* p) {
1408     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1409     if (! oopDesc::is_null(heap_oop)) {
1410       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1411     }
1412   }
1413 
1414 public:
1415   void do_oop(oop* p)       {
1416     do_oop_work(p);
1417   }
1418 
1419   void do_oop(narrowOop* p) {
1420     do_oop_work(p);
1421   }
1422 
1423 };
1424 
1425 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1426 
1427   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1428 
1429   CodeBlobToOopClosure blobsCl(cl, false);
1430   CLDToOopClosure cldCl(cl);
1431 
1432   ShenandoahRootProcessor rp(this, 1);
1433   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1434 }
1435 
1436 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1437 
1438   VerifyEvacuationClosure rootsCl(from_region);
1439   roots_iterate(&rootsCl);
1440 
1441 }
1442 
1443 bool ShenandoahHeap::supports_tlab_allocation() const {
1444   return true;
1445 }
1446 
1447 
1448 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1449   size_t idx = _free_regions->current_index();
1450   ShenandoahHeapRegion* current = _free_regions->get_or_null(idx);
1451   if (current == NULL) {
1452     return 0;
1453   } else if (current->free() >= MinTLABSize) {
1454     // Current region has enough space left, can use it.
1455     return current->free();
1456   } else {
1457     // No more space in current region, peek next region
1458     return _free_regions->unsafe_peek_next_no_humongous();
1459   }
1460 }
1461 
1462 size_t ShenandoahHeap::max_tlab_size() const {
1463   return ShenandoahHeapRegion::region_size_bytes();
1464 }
1465 
1466 class ResizeGCLABClosure : public ThreadClosure {
1467 public:
1468   void do_thread(Thread* thread) {
1469     thread->gclab().resize();
1470   }
1471 };
1472 
1473 void ShenandoahHeap::resize_all_tlabs() {
1474   CollectedHeap::resize_all_tlabs();
1475 
1476   ResizeGCLABClosure cl;
1477   Threads::threads_do(&cl);
1478 }
1479 
1480 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1481 public:
1482   void do_thread(Thread* thread) {
1483     thread->gclab().accumulate_statistics();
1484     thread->gclab().initialize_statistics();
1485   }
1486 };
1487 
1488 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1489   AccumulateStatisticsGCLABClosure cl;
1490   Threads::threads_do(&cl);
1491 }
1492 
1493 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1494   return true;
1495 }
1496 
1497 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1498   // Overridden to do nothing.
1499   return new_obj;
1500 }
1501 
1502 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1503   return true;
1504 }
1505 
1506 bool ShenandoahHeap::card_mark_must_follow_store() const {
1507   return false;
1508 }
1509 
1510 void ShenandoahHeap::collect(GCCause::Cause cause) {
1511   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1512   if (GCCause::is_user_requested_gc(cause)) {
1513     if (! DisableExplicitGC) {
1514       _concurrent_gc_thread->do_full_gc(cause);
1515     }
1516   } else if (cause == GCCause::_allocation_failure) {
1517     collector_policy()->set_should_clear_all_soft_refs(true);
1518     _concurrent_gc_thread->do_full_gc(cause);
1519   }
1520 }
1521 
1522 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1523   //assert(false, "Shouldn't need to do full collections");
1524 }
1525 
1526 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1527   Unimplemented();
1528   return NULL;
1529 
1530 }
1531 
1532 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1533   return _shenandoah_policy;
1534 }
1535 
1536 
1537 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1538   Space* sp = heap_region_containing(addr);
1539   if (sp != NULL) {
1540     return sp->block_start(addr);
1541   }
1542   return NULL;
1543 }
1544 
1545 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1546   Space* sp = heap_region_containing(addr);
1547   assert(sp != NULL, "block_size of address outside of heap");
1548   return sp->block_size(addr);
1549 }
1550 
1551 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1552   Space* sp = heap_region_containing(addr);
1553   return sp->block_is_obj(addr);
1554 }
1555 
1556 jlong ShenandoahHeap::millis_since_last_gc() {
1557   return 0;
1558 }
1559 
1560 void ShenandoahHeap::prepare_for_verify() {
1561   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1562     ensure_parsability(false);
1563   }
1564 }
1565 
1566 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1567   workers()->print_worker_threads_on(st);
1568 }
1569 
1570 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1571   workers()->threads_do(tcl);
1572 }
1573 
1574 void ShenandoahHeap::print_tracing_info() const {
1575   if (log_is_enabled(Info, gc, stats)) {
1576     ResourceMark rm;
1577     outputStream* out = Log(gc, stats)::info_stream();
1578     _shenandoah_policy->print_tracing_info(out);
1579   }
1580 }
1581 
1582 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1583 private:
1584   ShenandoahHeap*  _heap;
1585   VerifyOption     _vo;
1586   bool             _failures;
1587 public:
1588   // _vo == UsePrevMarking -> use "prev" marking information,
1589   // _vo == UseNextMarking -> use "next" marking information,
1590   // _vo == UseMarkWord    -> use mark word from object header.
1591   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1592     _heap(ShenandoahHeap::heap()),
1593     _vo(vo),
1594     _failures(false) { }
1595 
1596   bool failures() { return _failures; }
1597 
1598 private:
1599   template <class T>
1600   inline void do_oop_work(T* p) {
1601     oop obj = oopDesc::load_decode_heap_oop(p);
1602     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1603       { // Just for debugging.
1604         tty->print_cr("Root location "PTR_FORMAT
1605                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1606         //      obj->print_on(tty);
1607       }
1608     }
1609     guarantee(obj->is_oop_or_null(), "is oop or null");
1610   }
1611 
1612 public:
1613   void do_oop(oop* p)       {
1614     do_oop_work(p);
1615   }
1616 
1617   void do_oop(narrowOop* p) {
1618     do_oop_work(p);
1619   }
1620 
1621 };
1622 
1623 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1624 private:
1625   ShenandoahVerifyRootsClosure _rootsCl;
1626 public:
1627   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1628     _rootsCl(rc) {};
1629 
1630   void do_object(oop p) {
1631     _rootsCl.do_oop(&p);
1632   }
1633 };
1634 
1635 void ShenandoahHeap::verify(VerifyOption vo) {
1636   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1637 
1638     ShenandoahVerifyRootsClosure rootsCl(vo);
1639 
1640     assert(Thread::current()->is_VM_thread(),
1641            "Expected to be executed serially by the VM thread at this point");
1642 
1643     roots_iterate(&rootsCl);
1644 
1645     bool failures = rootsCl.failures();
1646     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1647 
1648     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1649 
1650     object_iterate(&heapCl);
1651     // TODO: Implement rest of it.
1652   } else {
1653     tty->print("(SKIPPING roots, heapRegions, remset) ");
1654   }
1655 }
1656 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1657   return _free_regions->capacity();
1658 }
1659 
1660 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1661   ObjectClosure* _cl;
1662 public:
1663   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1664   bool doHeapRegion(ShenandoahHeapRegion* r) {
1665     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1666     return false;
1667   }
1668 };
1669 
1670 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1671   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1672   heap_region_iterate(&blk, false, true);
1673 }
1674 
1675 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1676 private:
1677   ShenandoahHeap* _heap;
1678 
1679 public:
1680   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1681 
1682 private:
1683   template <class T>
1684   inline void do_oop_work(T* p) {
1685     T o = oopDesc::load_heap_oop(p);
1686     if (!oopDesc::is_null(o)) {
1687       oop obj = oopDesc::decode_heap_oop_not_null(o);
1688       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1689     }
1690   }
1691 public:
1692   void do_oop(oop* p) {
1693     do_oop_work(p);
1694   }
1695   void do_oop(narrowOop* p) {
1696     do_oop_work(p);
1697   }
1698 };
1699 
1700 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1701 private:
1702   ObjectClosure* _cl;
1703 public:
1704   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1705 
1706   virtual void do_object(oop obj) {
1707     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1708             "avoid double-counting: only non-forwarded objects here");
1709 
1710     // Fix up the ptrs.
1711     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1712     obj->oop_iterate(&adjust_ptrs);
1713 
1714     // Can reply the object now:
1715     _cl->do_object(obj);
1716   }
1717 };
1718 
1719 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1720   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1721 
1722   // Safe iteration does objects only with correct references.
1723   // This is why we skip dirty regions that have stale copies of objects,
1724   // and fix up the pointers in the returned objects.
1725 
1726   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1727   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1728   heap_region_iterate(&blk,
1729                       /* skip_dirty_regions = */ true,
1730                       /* skip_humongous_continuations = */ true);
1731 
1732   _need_update_refs = false; // already updated the references
1733 }
1734 
1735 // Apply blk->doHeapRegion() on all committed regions in address order,
1736 // terminating the iteration early if doHeapRegion() returns true.
1737 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1738   for (size_t i = 0; i < _num_regions; i++) {
1739     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1740     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1741       continue;
1742     }
1743     if (skip_dirty_regions && in_collection_set(current)) {
1744       continue;
1745     }
1746     if (blk->doHeapRegion(current)) {
1747       return;
1748     }
1749   }
1750 }
1751 
1752 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1753   ShenandoahHeap* sh;
1754 public:
1755   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1756 
1757   bool doHeapRegion(ShenandoahHeapRegion* r) {
1758     r->clear_live_data();
1759     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1760     return false;
1761   }
1762 };
1763 
1764 void ShenandoahHeap::start_concurrent_marking() {
1765 
1766   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1767   accumulate_statistics_all_tlabs();
1768   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1769 
1770   set_concurrent_mark_in_progress(true);
1771   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1772   if (UseTLAB) {
1773     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1774     ensure_parsability(true);
1775     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1776   }
1777 
1778   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1779   _used_start_gc = used();
1780 
1781 #ifdef ASSERT
1782   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1783     ensure_parsability(false);
1784     print_all_refs("pre-mark");
1785   }
1786 #endif
1787 
1788   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1789   ClearLivenessClosure clc(this);
1790   heap_region_iterate(&clc);
1791   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1792 
1793   if (UseShenandoahMatrix) {
1794     connection_matrix()->clear_all();
1795   }
1796   // print_all_refs("pre -mark");
1797 
1798   // oopDesc::_debug = true;
1799 
1800   // Make above changes visible to worker threads
1801   OrderAccess::fence();
1802 
1803   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1804   concurrentMark()->init_mark_roots();
1805   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1806 
1807   //  print_all_refs("pre-mark2");
1808 }
1809 
1810 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1811 
1812   ShenandoahHeap* _sh;
1813 
1814 public:
1815   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1816 
1817   template<class T> void do_oop_nv(T* p) {
1818     T heap_oop = oopDesc::load_heap_oop(p);
1819     if (!oopDesc::is_null(heap_oop)) {
1820       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1821       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1822                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1823                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1824                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1825                 obj->klass()->external_name(),
1826                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1827                 );
1828       obj = oopDesc::bs()->read_barrier(obj);
1829       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1830       guarantee(obj->is_oop(), "is_oop");
1831       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1832     }
1833   }
1834 
1835   void do_oop(oop* p)       { do_oop_nv(p); }
1836   void do_oop(narrowOop* p) { do_oop_nv(p); }
1837 
1838 };
1839 
1840 void ShenandoahHeap::verify_heap_after_evacuation() {
1841 
1842   verify_heap_size_consistency();
1843 
1844   ensure_parsability(false);
1845 
1846   VerifyAfterEvacuationClosure cl;
1847   roots_iterate(&cl);
1848 
1849   ObjectToOopClosure objs(&cl);
1850   object_iterate(&objs);
1851 
1852 }
1853 
1854 void ShenandoahHeap::swap_mark_bitmaps() {
1855   // Swap bitmaps.
1856   CMBitMap* tmp1 = _complete_mark_bit_map;
1857   _complete_mark_bit_map = _next_mark_bit_map;
1858   _next_mark_bit_map = tmp1;
1859 
1860   // Swap top-at-mark-start pointers
1861   HeapWord** tmp2 = _complete_top_at_mark_starts;
1862   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1863   _next_top_at_mark_starts = tmp2;
1864 
1865   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1866   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1867   _next_top_at_mark_starts_base = tmp3;
1868 }
1869 
1870 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1871 private:
1872   SCMObjToScanQueue* _queue;
1873   ShenandoahHeap* _heap;
1874   CMBitMap* _map;
1875   bool _check_matrix;
1876   oop _obj;
1877 public:
1878   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1879           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1880   template <class T>
1881   void do_oop_work(T* p) {
1882     T o = oopDesc::load_heap_oop(p);
1883     if (!oopDesc::is_null(o)) {
1884       oop obj = oopDesc::decode_heap_oop_not_null(o);
1885       guarantee(check_obj_alignment(obj), "sanity");
1886 
1887       guarantee(!oopDesc::is_null(obj), "sanity");
1888       guarantee(_heap->is_in(obj), "sanity");
1889 
1890       oop forw = BrooksPointer::forwardee(obj);
1891       guarantee(!oopDesc::is_null(forw), "sanity");
1892       guarantee(_heap->is_in(forw), "sanity");
1893 
1894       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1895 
1896       if (_check_matrix) {
1897         size_t from_idx = _heap->heap_region_index_containing(p);
1898         size_t to_idx = _heap->heap_region_index_containing(obj);
1899         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1900           tty->print_cr("from-obj: ");
1901           _obj->print_on(tty);
1902           tty->print_cr("to-obj:");
1903           obj->print_on(tty);
1904           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1905           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1906           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1907           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1908           tty->print_cr("from-idx: " SIZE_FORMAT ", to-idx: " SIZE_FORMAT, from_idx, to_idx);
1909 
1910           oop fwd_from = BrooksPointer::forwardee(_obj);
1911           oop fwd_to = BrooksPointer::forwardee(obj);
1912           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1913           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1914           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1915           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1916           size_t fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1917           size_t fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1918           tty->print_cr("forward(from-idx): " SIZE_FORMAT ", forward(to-idx): " SIZE_FORMAT, fwd_from_idx, fwd_to_idx);
1919           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1920         }
1921         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1922         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1923       }
1924 
1925       if (_map->parMark((HeapWord*) obj)) {
1926         _queue->push(SCMTask(obj));
1927       }
1928     }
1929   }
1930 
1931   void do_oop(oop* p) { do_oop_work(p); }
1932   void do_oop(narrowOop* p) { do_oop_work(p); }
1933   void set_obj(oop o) { _obj = o; }
1934 };
1935 
1936 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1937   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1938   guarantee(ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix),
1939             "only when these are enabled, and bitmap is initialized in ShenandoahHeap::initialize");
1940 
1941   OrderAccess::fence();
1942   ensure_parsability(false);
1943 
1944   // Allocate temporary bitmap for storing marking wavefront:
1945   MemRegion mr = MemRegion(_verification_bit_map.startWord(), _verification_bit_map.endWord());
1946   _verification_bit_map.clear_range_large(mr);
1947 
1948   // Initialize a single queue
1949   SCMObjToScanQueue* q = new SCMObjToScanQueue();
1950   q->initialize();
1951 
1952   // Scan root set
1953   ShenandoahRootProcessor rp(this, 1);
1954 
1955   {
1956     VerifyReachableHeapClosure cl(q, &_verification_bit_map, false);
1957     CLDToOopClosure cld_cl(&cl);
1958     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
1959     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
1960   }
1961 
1962   // Finish the scan
1963   {
1964     VerifyReachableHeapClosure cl(q, &_verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
1965     SCMTask task;
1966     while ((q->pop_buffer(task) ||
1967             q->pop_local(task) ||
1968             q->pop_overflow(task))) {
1969       oop obj = task.obj();
1970       assert(!oopDesc::is_null(obj), "must not be null");
1971       cl.set_obj(obj);
1972       obj->oop_iterate(&cl);
1973     }
1974   }
1975 
1976   // Clean up!
1977   delete(q);
1978 }
1979 
1980 void ShenandoahHeap::stop_concurrent_marking() {
1981   assert(concurrent_mark_in_progress(), "How else could we get here?");
1982   if (! cancelled_concgc()) {
1983     // If we needed to update refs, and concurrent marking has been cancelled,
1984     // we need to finish updating references.
1985     set_need_update_refs(false);
1986     swap_mark_bitmaps();
1987   }
1988   set_concurrent_mark_in_progress(false);
1989 
1990   if (log_is_enabled(Trace, gc, region)) {
1991     ResourceMark rm;
1992     outputStream* out = Log(gc, region)::trace_stream();
1993     print_heap_regions(out);
1994   }
1995 
1996 }
1997 
1998 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1999   _concurrent_mark_in_progress = in_progress ? 1 : 0;
2000   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2001 }
2002 
2003 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
2004   // Note: it is important to first release the _evacuation_in_progress flag here,
2005   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
2006   // in case a VM task is pending.
2007   set_evacuation_in_progress(in_progress);
2008   MutexLocker mu(Threads_lock);
2009   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2010 }
2011 
2012 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
2013   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
2014   set_evacuation_in_progress(in_progress);
2015   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2016 }
2017 
2018 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2019   _evacuation_in_progress = in_progress ? 1 : 0;
2020   OrderAccess::fence();
2021 }
2022 
2023 void ShenandoahHeap::verify_copy(oop p,oop c){
2024     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2025     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2026     if (p->klass() != c->klass()) {
2027       print_heap_regions();
2028     }
2029     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2030     assert(p->size() == c->size(), "verify size");
2031     // Object may have been locked between copy and verification
2032     //    assert(p->mark() == c->mark(), "verify mark");
2033     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2034   }
2035 
2036 void ShenandoahHeap::oom_during_evacuation() {
2037   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2038                         Thread::current()->osthread()->thread_id());
2039 
2040   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2041   collector_policy()->set_should_clear_all_soft_refs(true);
2042   concurrent_thread()->try_set_full_gc();
2043   cancel_concgc(_oom_evacuation);
2044 
2045   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2046     assert(! Threads_lock->owned_by_self()
2047            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2048     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2049     while (_evacuation_in_progress) { // wait.
2050       Thread::current()->_ParkEvent->park(1);
2051     }
2052   }
2053 
2054 }
2055 
2056 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2057   // Initialize Brooks pointer for the next object
2058   HeapWord* result = obj + BrooksPointer::word_size();
2059   BrooksPointer::initialize(oop(result));
2060   return result;
2061 }
2062 
2063 uint ShenandoahHeap::oop_extra_words() {
2064   return BrooksPointer::word_size();
2065 }
2066 
2067 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2068   size_t old_num_regions = _num_regions;
2069   ensure_new_regions(num_regions);
2070   for (size_t i = 0; i < num_regions; i++) {
2071     size_t new_region_index = i + old_num_regions;
2072     HeapWord* start = ((HeapWord*) base()) + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * new_region_index;
2073     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::region_size_bytes() / HeapWordSize, new_region_index);
2074 
2075     if (log_is_enabled(Trace, gc, region)) {
2076       ResourceMark rm;
2077       outputStream* out = Log(gc, region)::trace_stream();
2078       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2079       new_region->print_on(out);
2080     }
2081 
2082     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2083     _ordered_regions->add_region(new_region);
2084     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2085     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2086     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2087 
2088     _free_regions->add_region(new_region);
2089   }
2090 }
2091 
2092 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2093 
2094   size_t num_regions = _num_regions;
2095   size_t new_num_regions = num_regions + new_regions;
2096   assert(new_num_regions <= _max_regions, "we checked this earlier");
2097 
2098   size_t expand_size = new_regions * ShenandoahHeapRegion::region_size_bytes();
2099   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2100   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2101   assert(success, "should always be able to expand by requested size");
2102 
2103   _num_regions = new_num_regions;
2104 
2105 }
2106 
2107 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2108   _heap(ShenandoahHeap::heap_no_check()) {
2109 }
2110 
2111 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2112   _heap = heap;
2113 }
2114 
2115 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2116 
2117   assert(_heap != NULL, "sanity");
2118   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2119 #ifdef ASSERT
2120   if (_heap->concurrent_mark_in_progress()) {
2121     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2122   }
2123 #endif
2124   assert(!oopDesc::is_null(obj), "null");
2125   return _heap->is_marked_next(obj);
2126 }
2127 
2128 void ShenandoahHeap::ref_processing_init() {
2129   MemRegion mr = reserved_region();
2130 
2131   isAlive.init(ShenandoahHeap::heap());
2132   assert(_max_workers > 0, "Sanity");
2133 
2134   _ref_processor =
2135     new ReferenceProcessor(mr,    // span
2136                            ParallelRefProcEnabled,
2137                            // mt processing
2138                            _max_workers,
2139                            // degree of mt processing
2140                            true,
2141                            // mt discovery
2142                            _max_workers,
2143                            // degree of mt discovery
2144                            false,
2145                            // Reference discovery is not atomic
2146                            &isAlive);
2147 }
2148 
2149 size_t ShenandoahHeap::num_regions() {
2150   return _num_regions;
2151 }
2152 
2153 size_t ShenandoahHeap::max_regions() {
2154   return _max_regions;
2155 }
2156 
2157 GCTracer* ShenandoahHeap::tracer() {
2158   return shenandoahPolicy()->tracer();
2159 }
2160 
2161 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2162   return _free_regions->used();
2163 }
2164 
2165 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2166   if (try_cancel_concgc()) {
2167     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2168     _shenandoah_policy->report_concgc_cancelled();
2169   }
2170 }
2171 
2172 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2173   if (try_cancel_concgc()) {
2174     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2175     _shenandoah_policy->report_concgc_cancelled();
2176   }
2177 }
2178 
2179 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2180   switch (cause) {
2181     case _oom_evacuation:
2182       return "Out of memory for evacuation";
2183     case _vm_stop:
2184       return "Stopping VM";
2185     default:
2186       return "Unknown";
2187   }
2188 }
2189 
2190 uint ShenandoahHeap::max_workers() {
2191   return _max_workers;
2192 }
2193 
2194 void ShenandoahHeap::stop() {
2195   // The shutdown sequence should be able to terminate when GC is running.
2196 
2197   // Step 1. Notify control thread that we are in shutdown.
2198   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2199   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2200   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2201 
2202   // Step 2. Notify GC workers that we are cancelling GC.
2203   cancel_concgc(_vm_stop);
2204 
2205   // Step 3. Wait until GC worker exits normally.
2206   _concurrent_gc_thread->stop();
2207 }
2208 
2209 void ShenandoahHeap::unload_classes_and_cleanup_tables() {
2210   ShenandoahForwardedIsAliveClosure is_alive;
2211   // Unload classes and purge SystemDictionary.
2212   bool purged_class = SystemDictionary::do_unloading(&is_alive, true);
2213   ParallelCleaningTask unlink_task(&is_alive, true, true, _workers->active_workers(), purged_class);
2214   _workers->run_task(&unlink_task);
2215   ClassLoaderDataGraph::purge();
2216 }
2217 
2218 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2219   _need_update_refs = need_update_refs;
2220 }
2221 
2222 //fixme this should be in heapregionset
2223 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2224   size_t region_idx = r->region_number() + 1;
2225   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2226   guarantee(next->region_number() == region_idx, "region number must match");
2227   while (next->is_humongous()) {
2228     region_idx = next->region_number() + 1;
2229     next = _ordered_regions->get(region_idx);
2230     guarantee(next->region_number() == region_idx, "region number must match");
2231   }
2232   return next;
2233 }
2234 
2235 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2236   _in_cset_fast_test_base[region_index] = b;
2237 }
2238 
2239 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2240   return _monitoring_support;
2241 }
2242 
2243 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2244   return _complete_mark_bit_map;
2245 }
2246 
2247 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2248   return _next_mark_bit_map;
2249 }
2250 
2251 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2252   _free_regions->add_region(r);
2253 }
2254 
2255 void ShenandoahHeap::clear_free_regions() {
2256   _free_regions->clear();
2257 }
2258 
2259 address ShenandoahHeap::in_cset_fast_test_addr() {
2260   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2261 }
2262 
2263 address ShenandoahHeap::cancelled_concgc_addr() {
2264   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2265 }
2266 
2267 void ShenandoahHeap::clear_cset_fast_test() {
2268   assert(_in_cset_fast_test_base != NULL, "sanity");
2269   memset(_in_cset_fast_test_base, false,
2270          _in_cset_fast_test_length * sizeof(bool));
2271 }
2272 
2273 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2274   return ShenandoahMaxRegionSize;
2275 }
2276 
2277 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2278   return _bytes_allocated_since_cm;
2279 }
2280 
2281 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2282   _bytes_allocated_since_cm = bytes;
2283 }
2284 
2285 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2286   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2287   _next_top_at_mark_starts[index] = addr;
2288 }
2289 
2290 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2291   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2292   return _next_top_at_mark_starts[index];
2293 }
2294 
2295 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2296   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2297   _complete_top_at_mark_starts[index] = addr;
2298 }
2299 
2300 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2301   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2302   return _complete_top_at_mark_starts[index];
2303 }
2304 
2305 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2306   _full_gc_in_progress = in_progress;
2307 }
2308 
2309 bool ShenandoahHeap::is_full_gc_in_progress() const {
2310   return _full_gc_in_progress;
2311 }
2312 
2313 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2314   _update_refs_in_progress = in_progress;
2315 }
2316 
2317 bool ShenandoahHeap::is_update_refs_in_progress() const {
2318   return _update_refs_in_progress;
2319 }
2320 
2321 class NMethodOopInitializer : public OopClosure {
2322 private:
2323   ShenandoahHeap* _heap;
2324 public:
2325   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2326   }
2327 
2328 private:
2329   template <class T>
2330   inline void do_oop_work(T* p) {
2331     T o = oopDesc::load_heap_oop(p);
2332     if (! oopDesc::is_null(o)) {
2333       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2334       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2335       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2336         oopDesc::encode_store_heap_oop(p, obj2);
2337       }
2338     }
2339   }
2340 
2341 public:
2342   void do_oop(oop* o) {
2343     do_oop_work(o);
2344   }
2345   void do_oop(narrowOop* o) {
2346     do_oop_work(o);
2347   }
2348 };
2349 
2350 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2351   NMethodOopInitializer init;
2352   nm->oops_do(&init);
2353   nm->fix_oop_relocations();
2354 }
2355 
2356 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2357 }
2358 
2359 void ShenandoahHeap::pin_object(oop o) {
2360   heap_region_containing(o)->pin();
2361 }
2362 
2363 void ShenandoahHeap::unpin_object(oop o) {
2364   heap_region_containing(o)->unpin();
2365 }
2366 
2367 
2368 GCTimer* ShenandoahHeap::gc_timer() const {
2369   return _gc_timer;
2370 }
2371 
2372 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2373 private:
2374   size_t _garbage;
2375 public:
2376   ShenandoahCountGarbageClosure() : _garbage(0) {
2377   }
2378 
2379   bool doHeapRegion(ShenandoahHeapRegion* r) {
2380     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2381       _garbage += r->garbage();
2382     }
2383     return false;
2384   }
2385 
2386   size_t garbage() {
2387     return _garbage;
2388   }
2389 };
2390 
2391 size_t ShenandoahHeap::garbage() {
2392   ShenandoahCountGarbageClosure cl;
2393   heap_region_iterate(&cl);
2394   return cl.garbage();
2395 }
2396 
2397 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2398   return _connection_matrix;
2399 }
2400 
2401 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2402   return _partial_gc;
2403 }
2404 
2405 void ShenandoahHeap::do_partial_collection() {
2406   partial_gc()->do_partial_collection();
2407 }
2408 
2409 ShenandoahUpdateHeapRefsSuperClosure::ShenandoahUpdateHeapRefsSuperClosure() :
2410   _heap(ShenandoahHeap::heap()) {}
2411 
2412 template<class T>
2413 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2414 private:
2415   T cl;
2416   ShenandoahHeap* _heap;
2417   ShenandoahHeapRegionSet* _regions;
2418 
2419 public:
2420   ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions) :
2421     AbstractGangTask("Concurrent Update References Task"),
2422     cl(T()),
2423     _heap(ShenandoahHeap::heap()),
2424     _regions(regions) {
2425   }
2426 
2427   void work(uint worker_id) {
2428     ShenandoahHeapRegion* r = _regions->claim_next();
2429     while (r != NULL) {
2430       if (! _heap->in_collection_set(r) &&
2431           ! r->is_empty()) {
2432         _heap->marked_object_oop_safe_iterate(r, &cl);
2433       } else if (_heap->in_collection_set(r)) {
2434         HeapWord* bottom = r->bottom();
2435         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2436         if (top > bottom) {
2437           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2438         }
2439       }
2440       if (_heap->cancelled_concgc()) {
2441         return;
2442       }
2443       r = _regions->claim_next();
2444     }
2445   }
2446 };
2447 
2448 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions) {
2449   if (UseShenandoahMatrix) {
2450     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions);
2451     workers()->run_task(&task);
2452   } else {
2453     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions);
2454     workers()->run_task(&task);
2455   }
2456 }
2457 
2458 void ShenandoahHeap::concurrent_update_heap_references() {
2459   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs);
2460   ShenandoahHeapRegionSet* update_regions = regions();
2461   update_regions->clear_current_index();
2462   update_heap_references(update_regions);
2463   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs);
2464 }
2465 
2466 void ShenandoahHeap::prepare_update_refs() {
2467   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2468   set_evacuation_in_progress_at_safepoint(false);
2469   set_update_refs_in_progress(true);
2470   ensure_parsability(true);
2471   if (UseShenandoahMatrix) {
2472     connection_matrix()->clear_all();
2473   }
2474   for (uint i = 0; i < _num_regions; i++) {
2475     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2476     r->set_concurrent_iteration_safe_limit(r->top());
2477   }
2478 }
2479 
2480 void ShenandoahHeap::finish_update_refs() {
2481   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2482 
2483   if (cancelled_concgc()) {
2484     // Finish updating references where we left off.
2485     clear_cancelled_concgc();
2486     ShenandoahHeapRegionSet* update_regions = regions();
2487     update_heap_references(update_regions);
2488   }
2489 
2490   assert(! cancelled_concgc(), "Should have been done right before");
2491   concurrentMark()->update_roots(ShenandoahCollectorPolicy::final_update_refs_roots);
2492 
2493   // Allocations might have happened before we STWed here, record peak:
2494   shenandoahPolicy()->record_peak_occupancy();
2495 
2496   recycle_dirty_regions();
2497   set_need_update_refs(false);
2498 
2499   if (ShenandoahVerify) {
2500     verify_update_refs();
2501   }
2502 
2503   {
2504     // Rebuild the free set
2505     ShenandoahHeapLock hl(this);
2506     _free_regions->clear();
2507     size_t end = _ordered_regions->active_regions();
2508     for (size_t i = 0; i < end; i++) {
2509       ShenandoahHeapRegion* r = _ordered_regions->get(i);
2510       if (!r->is_humongous()) {
2511         assert (!in_collection_set(r), "collection set should be clear");
2512         _free_regions->add_region(r);
2513       }
2514     }
2515   }
2516   set_update_refs_in_progress(false);
2517 }
2518 
2519 class ShenandoahVerifyUpdateRefsClosure : public ExtendedOopClosure {
2520 private:
2521   template <class T>
2522   void do_oop_work(T* p) {
2523     T o = oopDesc::load_heap_oop(p);
2524     if (! oopDesc::is_null(o)) {
2525       oop obj = oopDesc::decode_heap_oop_not_null(o);
2526       guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
2527                 "must not be forwarded");
2528     }
2529   }
2530 public:
2531   void do_oop(oop* p) { do_oop_work(p); }
2532   void do_oop(narrowOop* p) { do_oop_work(p); }
2533 };
2534 
2535 void ShenandoahHeap::verify_update_refs() {
2536 
2537   ensure_parsability(false);
2538 
2539   ShenandoahVerifyUpdateRefsClosure cl;
2540 
2541   // Verify roots.
2542   {
2543     CodeBlobToOopClosure blobsCl(&cl, false);
2544     CLDToOopClosure cldCl(&cl);
2545     ShenandoahRootProcessor rp(this, 1);
2546     rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
2547   }
2548 
2549   // Verify heap.
2550   for (uint i = 0; i < num_regions(); i++) {
2551     ShenandoahHeapRegion* r = regions()->get(i);
2552     marked_object_oop_iterate(r, &cl);
2553   }
2554 }
2555 
2556 #ifdef ASSERT
2557 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2558   assert(_heap_lock == locked, "must be locked");
2559   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2560 }
2561 
2562 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2563   Thread* thr = Thread::current();
2564   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2565          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2566   "must own heap lock or by VM thread at safepoint");
2567 }
2568 
2569 #endif
2570 
2571 void ShenandoahHeap::start_deferred_recycling() {
2572   assert_heaplock_owned_by_current_thread();
2573   _recycled_region_count = 0;
2574 }
2575 
2576 void ShenandoahHeap::defer_recycle(ShenandoahHeapRegion* r) {
2577   assert_heaplock_owned_by_current_thread();
2578   _recycled_regions[_recycled_region_count++] = r->region_number();
2579 }
2580 
2581 void ShenandoahHeap::finish_deferred_recycle() {
2582   assert_heaplock_owned_by_current_thread();
2583   if (UseShenandoahMatrix) {
2584     for (size_t i = 0; i < _recycled_region_count; i++) {
2585       regions()->get(_recycled_regions[i])->recycle_no_matrix();
2586     }
2587     connection_matrix()->clear_batched(_recycled_regions, _recycled_region_count);
2588   } else {
2589     for (size_t i = 0; i < _recycled_region_count; i++) {
2590       regions()->get(_recycled_regions[i])->recycle();
2591     }
2592   }
2593 }