1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  48 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  49 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  53 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  54 
  55 #include "runtime/vmThread.hpp"
  56 #include "services/mallocTracker.hpp"
  57 
  58 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  59 
  60 #ifdef ASSERT
  61 template <class T>
  62 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  63   T o = oopDesc::load_heap_oop(p);
  64   if (! oopDesc::is_null(o)) {
  65     oop obj = oopDesc::decode_heap_oop_not_null(o);
  66     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  67            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  68            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  69   }
  70 }
  71 
  72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  74 #endif
  75 
  76 const char* ShenandoahHeap::name() const {
  77   return "Shenandoah";
  78 }
  79 
  80 class ShenandoahPretouchTask : public AbstractGangTask {
  81 private:
  82   ShenandoahHeapRegionSet* _regions;
  83   const size_t _bitmap_size;
  84   const size_t _page_size;
  85   char* _bitmap_base;
  86 public:
  87   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  88                          char* bitmap_base, size_t bitmap_size,
  89                          size_t page_size) :
  90     AbstractGangTask("Shenandoah PreTouch",
  91                      Universe::is_fully_initialized() ? GCId::current_raw() :
  92                                                         // During VM initialization there is
  93                                                         // no GC cycle that this task can be
  94                                                         // associated with.
  95                                                         GCId::undefined()),
  96     _bitmap_base(bitmap_base),
  97     _regions(regions),
  98     _bitmap_size(bitmap_size),
  99     _page_size(page_size) {
 100     _regions->clear_current_index();
 101   };
 102 
 103   virtual void work(uint worker_id) {
 104     ShenandoahHeapRegion* r = _regions->claim_next();
 105     while (r != NULL) {
 106       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 107                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 108       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 109 
 110       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 111       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 112       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 113 
 114       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 115                           r->region_number(), p2i(_bitmap_base + start), p2i(_bitmap_base + end));
 116       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 117 
 118       r = _regions->claim_next();
 119     }
 120   }
 121 };
 122 
 123 jint ShenandoahHeap::initialize() {
 124   CollectedHeap::pre_initialize();
 125 
 126   BrooksPointer::initial_checks();
 127 
 128   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 129   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 130   size_t heap_alignment = collector_policy()->heap_alignment();
 131 
 132   if (ShenandoahAlwaysPreTouch) {
 133     // Enabled pre-touch means the entire heap is committed right away.
 134     init_byte_size = max_byte_size;
 135   }
 136 
 137   Universe::check_alignment(max_byte_size,
 138                             ShenandoahHeapRegion::region_size_bytes(),
 139                             "shenandoah heap");
 140   Universe::check_alignment(init_byte_size,
 141                             ShenandoahHeapRegion::region_size_bytes(),
 142                             "shenandoah heap");
 143 
 144   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 145                                                  heap_alignment);
 146   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 147 
 148   set_barrier_set(new ShenandoahBarrierSet(this));
 149   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 150 
 151   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 152   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 153   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 154   _committed = _initial_size;
 155 
 156   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 157   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 158     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 159   }
 160 
 161   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 162 
 163   _ordered_regions = new ShenandoahHeapRegionSet(_num_regions);
 164   _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions);
 165 
 166   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 167 
 168   _top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 169   _top_at_mark_starts = _top_at_mark_starts_base -
 170                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 171 
 172 
 173   {
 174     ShenandoahHeapLocker locker(lock());
 175     for (size_t i = 0; i < _num_regions; i++) {
 176       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 177                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 178                                                          reg_size_words,
 179                                                          i,
 180                                                          i < num_committed_regions);
 181 
 182       _top_at_mark_starts_base[i] = r->bottom();
 183 
 184       // Add to ordered regions first.
 185       // We use the active size of ordered regions as the number of active regions in heap,
 186       // free set and collection set use the number to assert the correctness of incoming regions.
 187       _ordered_regions->add_region(r);
 188       _free_regions->add_region(r);
 189       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 190     }
 191   }
 192 
 193   assert(_ordered_regions->active_regions() == _num_regions, "Must match");
 194   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 195          "misaligned heap: "PTR_FORMAT, p2i(base()));
 196 
 197   LogTarget(Trace, gc, region) lt;
 198   if (lt.is_enabled()) {
 199     ResourceMark rm;
 200     LogStream ls(lt);
 201     log_trace(gc, region)("All Regions");
 202     _ordered_regions->print_on(&ls);
 203     log_trace(gc, region)("Free Regions");
 204     _free_regions->print_on(&ls);
 205   }
 206 
 207   _recycled_regions = NEW_C_HEAP_ARRAY(size_t, _num_regions, mtGC);
 208 
 209   // The call below uses stuff (the SATB* things) that are in G1, but probably
 210   // belong into a shared location.
 211   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 212                                                SATB_Q_FL_lock,
 213                                                20 /*G1SATBProcessCompletedThreshold */,
 214                                                Shared_SATB_Q_lock);
 215 
 216   // Reserve space for prev and next bitmap.
 217   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 218   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 219 
 220   size_t bitmap_bytes_per_region = _bitmap_size / _num_regions;
 221   _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize;
 222 
 223   guarantee(is_power_of_2(bitmap_bytes_per_region),
 224             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 225   guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0,
 226             "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d",
 227             bitmap_bytes_per_region, os::vm_page_size());
 228   guarantee(is_power_of_2(_bitmap_words_per_region),
 229             "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region);
 230   guarantee(bitmap_bytes_per_region >= (size_t)os::vm_page_size(),
 231             "Bitmap slice per region (" SIZE_FORMAT ") should be larger than page size (%d)",
 232             bitmap_bytes_per_region, os::vm_page_size());
 233 
 234   size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ?
 235                             (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 236 
 237   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 238   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 239   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 240 
 241   {
 242     ShenandoahHeapLocker locker(lock());
 243     for (size_t i = 0; i < _num_regions; i++) {
 244       ShenandoahHeapRegion* r = _ordered_regions->get(i);
 245       if (r->is_committed()) {
 246         commit_bitmaps(r);
 247       }
 248     }
 249   }
 250 
 251   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 252 
 253   if (ShenandoahVerify) {
 254     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 255     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 256                               "couldn't allocate verification bitmap");
 257     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 258     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 259     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 260     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 261   }
 262 
 263   if (ShenandoahAlwaysPreTouch) {
 264     assert (!AlwaysPreTouch, "Should have been overridden");
 265 
 266     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 267     // before initialize() below zeroes it with initializing thread. For any given region,
 268     // we touch the region and the corresponding bitmaps from the same thread.
 269 
 270     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 271                        _ordered_regions->count(), page_size);
 272     ShenandoahPretouchTask cl(_ordered_regions, bitmap.base(), _bitmap_size, page_size);
 273     _workers->run_task(&cl);
 274   }
 275 
 276   _mark_bit_map.initialize(_heap_region, _bitmap_region);
 277 
 278   if (UseShenandoahMatrix) {
 279     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 280   } else {
 281     _connection_matrix = NULL;
 282   }
 283 
 284   _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
 285                 new ShenandoahPartialGC(this, _num_regions) :
 286                 NULL;
 287 
 288   _monitoring_support = new ShenandoahMonitoringSupport(this);
 289 
 290   _phase_timings = new ShenandoahPhaseTimings();
 291 
 292   if (ShenandoahAllocationTrace) {
 293     _alloc_tracker = new ShenandoahAllocTracker();
 294   }
 295 
 296   ShenandoahStringDedup::initialize();
 297 
 298   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 299 
 300   ShenandoahMarkCompact::initialize();
 301 
 302   ShenandoahCodeRoots::initialize();
 303 
 304   return JNI_OK;
 305 }
 306 
 307 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 308   CollectedHeap(),
 309   _shenandoah_policy(policy),
 310   _concurrent_mark_in_progress(0),
 311   _evacuation_in_progress(0),
 312   _full_gc_in_progress(false),
 313   _update_refs_in_progress(false),
 314   _free_regions(NULL),
 315   _collection_set(NULL),
 316   _bytes_allocated_since_cm(0),
 317   _bytes_allocated_during_cm(0),
 318   _allocated_last_gc(0),
 319   _used_start_gc(0),
 320   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 321   _ref_processor(NULL),
 322   _top_at_mark_starts(NULL),
 323   _top_at_mark_starts_base(NULL),
 324   _mark_bit_map(),
 325   _connection_matrix(NULL),
 326   _cancelled_concgc(0),
 327   _need_update_refs(false),
 328   _need_reset_bitmap(false),
 329   _bitmap_valid(true),
 330   _verifier(NULL),
 331   _heap_lock(0),
 332   _used_at_last_gc(0),
 333   _alloc_seq_at_last_gc_start(0),
 334   _alloc_seq_at_last_gc_end(0),
 335   _safepoint_workers(NULL),
 336 #ifdef ASSERT
 337   _heap_lock_owner(NULL),
 338   _heap_expansion_count(0),
 339 #endif
 340   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 341   _phase_timings(NULL),
 342   _alloc_tracker(NULL)
 343 {
 344   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 345   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 346   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 347 
 348   _scm = new ShenandoahConcurrentMark();
 349   _used = 0;
 350 
 351   _max_workers = MAX2(_max_workers, 1U);
 352   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 353                             /* are_GC_task_threads */true,
 354                             /* are_ConcurrentGC_threads */false);
 355   if (_workers == NULL) {
 356     vm_exit_during_initialization("Failed necessary allocation.");
 357   } else {
 358     _workers->initialize_workers();
 359   }
 360 
 361   if (ParallelSafepointCleanupThreads > 1) {
 362     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 363                                                 ParallelSafepointCleanupThreads,
 364                                                 false, false);
 365     _safepoint_workers->initialize_workers();
 366   }
 367 }
 368 
 369 class ShenandoahResetBitmapTask : public AbstractGangTask {
 370 private:
 371   ShenandoahHeapRegionSet* _regions;
 372 
 373 public:
 374   ShenandoahResetBitmapTask(ShenandoahHeapRegionSet* regions) :
 375     AbstractGangTask("Parallel Reset Bitmap Task"),
 376     _regions(regions) {
 377     _regions->clear_current_index();
 378   }
 379 
 380   void work(uint worker_id) {
 381     ShenandoahHeapRegion* region = _regions->claim_next();
 382     ShenandoahHeap* heap = ShenandoahHeap::heap();
 383     while (region != NULL) {
 384       if (region->is_committed()) {
 385         HeapWord* bottom = region->bottom();
 386         HeapWord* top = heap->top_at_mark_start(region->bottom());
 387         if (top > bottom) {
 388           heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 389         }
 390         assert(heap->is_bitmap_clear_range(bottom, region->end()), "must be clear");
 391         heap->set_top_at_mark_start(region->bottom(), region->bottom());
 392       }
 393       region = _regions->claim_next();
 394     }
 395   }
 396 };
 397 
 398 void ShenandoahHeap::reset_mark_bitmap(WorkGang* workers) {
 399   assert_gc_workers(workers->active_workers());
 400 
 401   ShenandoahResetBitmapTask task = ShenandoahResetBitmapTask(_ordered_regions);
 402   workers->run_task(&task);
 403 }
 404 
 405 bool ShenandoahHeap::is_bitmap_clear() {
 406   for (size_t idx = 0; idx < _num_regions; idx++) {
 407     ShenandoahHeapRegion* r = _ordered_regions->get(idx);
 408     if (r->is_committed() && !is_bitmap_clear_range(r->bottom(), r->end())) {
 409       return false;
 410     }
 411   }
 412   return true;
 413 }
 414 
 415 bool ShenandoahHeap::is_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 416   return _mark_bit_map.getNextMarkedWordAddress(start, end) == end;
 417 }
 418 
 419 void ShenandoahHeap::print_on(outputStream* st) const {
 420   st->print_cr("Shenandoah Heap");
 421   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 422                capacity() / K, committed() / K, used() / K);
 423   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 424                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 425 
 426   st->print("Status: ");
 427   if (concurrent_mark_in_progress()) {
 428     st->print("marking ");
 429   } else if (is_evacuation_in_progress()) {
 430     st->print("evacuating ");
 431   } else if (is_update_refs_in_progress()) {
 432     st->print("updating refs ");
 433   } else {
 434     st->print("idle ");
 435   }
 436   if (cancelled_concgc()) {
 437     st->print("cancelled ");
 438   }
 439   st->cr();
 440 
 441   st->print_cr("Reserved region:");
 442   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 443                p2i(reserved_region().start()),
 444                p2i(reserved_region().end()));
 445 
 446   if (UseShenandoahMatrix) {
 447     st->print_cr("Matrix:");
 448 
 449     ShenandoahConnectionMatrix* matrix = connection_matrix();
 450     if (matrix != NULL) {
 451       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 452       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 453       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 454     } else {
 455       st->print_cr(" No matrix.");
 456     }
 457   }
 458 
 459   if (Verbose) {
 460     print_heap_regions_on(st);
 461   }
 462 }
 463 
 464 class ShenandoahInitGCLABClosure : public ThreadClosure {
 465 public:
 466   void do_thread(Thread* thread) {
 467     thread->gclab().initialize(true);
 468   }
 469 };
 470 
 471 void ShenandoahHeap::post_initialize() {
 472   if (UseTLAB) {
 473     MutexLocker ml(Threads_lock);
 474 
 475     ShenandoahInitGCLABClosure init_gclabs;
 476     Threads::java_threads_do(&init_gclabs);
 477     gc_threads_do(&init_gclabs);
 478 
 479     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 480     // Now, we will let WorkGang to initialize gclab when new worker is created.
 481     _workers->set_initialize_gclab();
 482   }
 483 
 484   _scm->initialize(_max_workers);
 485 
 486   ref_processing_init();
 487 
 488   _shenandoah_policy->post_heap_initialize();
 489 }
 490 
 491 size_t ShenandoahHeap::used() const {
 492   OrderAccess::acquire();
 493   return _used;
 494 }
 495 
 496 size_t ShenandoahHeap::committed() const {
 497   OrderAccess::acquire();
 498   return _committed;
 499 }
 500 
 501 void ShenandoahHeap::increase_committed(size_t bytes) {
 502   assert_heaplock_or_safepoint();
 503   _committed += bytes;
 504 }
 505 
 506 void ShenandoahHeap::decrease_committed(size_t bytes) {
 507   assert_heaplock_or_safepoint();
 508   _committed -= bytes;
 509 }
 510 
 511 void ShenandoahHeap::increase_used(size_t bytes) {
 512   assert_heaplock_or_safepoint();
 513   _used += bytes;
 514 }
 515 
 516 void ShenandoahHeap::set_used(size_t bytes) {
 517   assert_heaplock_or_safepoint();
 518   _used = bytes;
 519 }
 520 
 521 void ShenandoahHeap::decrease_used(size_t bytes) {
 522   assert_heaplock_or_safepoint();
 523   assert(_used >= bytes, "never decrease heap size by more than we've left");
 524   _used -= bytes;
 525 }
 526 
 527 size_t ShenandoahHeap::capacity() const {
 528   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 529 }
 530 
 531 bool ShenandoahHeap::is_maximal_no_gc() const {
 532   Unimplemented();
 533   return true;
 534 }
 535 
 536 size_t ShenandoahHeap::max_capacity() const {
 537   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 538 }
 539 
 540 size_t ShenandoahHeap::initial_capacity() const {
 541   return _initial_size;
 542 }
 543 
 544 bool ShenandoahHeap::is_in(const void* p) const {
 545   HeapWord* heap_base = (HeapWord*) base();
 546   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 547   return p >= heap_base && p < last_region_end;
 548 }
 549 
 550 bool ShenandoahHeap::is_scavengable(const void* p) {
 551   return true;
 552 }
 553 
 554 void ShenandoahHeap::handle_heap_shrinkage() {
 555   ShenandoahHeapLocker locker(lock());
 556 
 557   ShenandoahHeapRegionSet* set = regions();
 558 
 559   size_t count = 0;
 560   double current = os::elapsedTime();
 561   for (size_t i = 0; i < num_regions(); i++) {
 562     ShenandoahHeapRegion* r = set->get(i);
 563     if (r->is_empty_committed() &&
 564             (current - r->empty_time()) * 1000 > ShenandoahUncommitDelay &&
 565             r->make_empty_uncommitted()) {
 566       count++;
 567     }
 568   }
 569 
 570   if (count > 0) {
 571     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 572                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 573   }
 574 }
 575 
 576 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 577   // Retain tlab and allocate object in shared space if
 578   // the amount free in the tlab is too large to discard.
 579   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 580     thread->gclab().record_slow_allocation(size);
 581     return NULL;
 582   }
 583 
 584   // Discard gclab and allocate a new one.
 585   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 586   size_t new_gclab_size = thread->gclab().compute_size(size);
 587 
 588   thread->gclab().clear_before_allocation();
 589 
 590   if (new_gclab_size == 0) {
 591     return NULL;
 592   }
 593 
 594   // Allocate a new GCLAB...
 595   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 596   if (obj == NULL) {
 597     return NULL;
 598   }
 599 
 600   if (ZeroTLAB) {
 601     // ..and clear it.
 602     Copy::zero_to_words(obj, new_gclab_size);
 603   } else {
 604     // ...and zap just allocated object.
 605 #ifdef ASSERT
 606     // Skip mangling the space corresponding to the object header to
 607     // ensure that the returned space is not considered parsable by
 608     // any concurrent GC thread.
 609     size_t hdr_size = oopDesc::header_size();
 610     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 611 #endif // ASSERT
 612   }
 613   thread->gclab().fill(obj, obj + size, new_gclab_size);
 614   return obj;
 615 }
 616 
 617 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 618 #ifdef ASSERT
 619   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 620 #endif
 621   return allocate_new_lab(word_size, _alloc_tlab);
 622 }
 623 
 624 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 625 #ifdef ASSERT
 626   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 627 #endif
 628   return allocate_new_lab(word_size, _alloc_gclab);
 629 }
 630 
 631 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 632   HeapWord* result = allocate_memory(word_size, type);
 633 
 634   if (result != NULL) {
 635     assert(! in_collection_set(result), "Never allocate in collection set");
 636     _bytes_allocated_since_cm += word_size * HeapWordSize;
 637 
 638     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 639 
 640   }
 641   return result;
 642 }
 643 
 644 ShenandoahHeap* ShenandoahHeap::heap() {
 645   CollectedHeap* heap = Universe::heap();
 646   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 647   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 648   return (ShenandoahHeap*) heap;
 649 }
 650 
 651 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 652   CollectedHeap* heap = Universe::heap();
 653   return (ShenandoahHeap*) heap;
 654 }
 655 
 656 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 657   ShenandoahAllocTrace trace_alloc(word_size, type);
 658 
 659   bool in_new_region = false;
 660   HeapWord* result = allocate_memory_under_lock(word_size, type, in_new_region);
 661 
 662   if (type == _alloc_tlab || type == _alloc_shared) {
 663     // Allocation failed, try full-GC, then retry allocation.
 664     //
 665     // It might happen that one of the threads requesting allocation would unblock
 666     // way later after full-GC happened, only to fail the second allocation, because
 667     // other threads have already depleted the free storage. In this case, a better
 668     // strategy would be to try full-GC again.
 669     //
 670     // Lacking the way to detect progress from "collect" call, we are left with blindly
 671     // retrying for some bounded number of times.
 672     // TODO: Poll if Full GC made enough progress to warrant retry.
 673     int tries = 0;
 674     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 675       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 676                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 677       collect(GCCause::_allocation_failure);
 678       result = allocate_memory_under_lock(word_size, type, in_new_region);
 679     }
 680   }
 681 
 682   if (in_new_region) {
 683     // Update monitoring counters when we took a new region. This amortizes the
 684     // update costs on slow path.
 685     concurrent_thread()->trigger_counters_update();
 686   }
 687 
 688   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 689                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 690 
 691   return result;
 692 }
 693 
 694 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 695   ShenandoahHeapLocker locker(lock());
 696   return _free_regions->allocate(word_size, type, in_new_region);
 697 }
 698 
 699 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 700                                         bool*  gc_overhead_limit_was_exceeded) {
 701   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 702   HeapWord* result = filler + BrooksPointer::word_size();
 703   if (filler != NULL) {
 704     BrooksPointer::initialize(oop(result));
 705     _bytes_allocated_since_cm += size * HeapWordSize;
 706 
 707     assert(! in_collection_set(result), "never allocate in targetted region");
 708     return result;
 709   } else {
 710     return NULL;
 711   }
 712 }
 713 
 714 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 715 private:
 716   ShenandoahHeap* _heap;
 717   Thread* _thread;
 718 public:
 719   ShenandoahEvacuateUpdateRootsClosure() :
 720           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 721   }
 722 
 723 private:
 724   template <class T>
 725   void do_oop_work(T* p) {
 726     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 727 
 728     T o = oopDesc::load_heap_oop(p);
 729     if (! oopDesc::is_null(o)) {
 730       oop obj = oopDesc::decode_heap_oop_not_null(o);
 731       if (_heap->in_collection_set(obj)) {
 732         assert(_heap->is_marked(obj), "only evacuate marked objects %d %d",
 733                _heap->is_marked(obj), _heap->is_marked(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
 734         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 735         if (oopDesc::unsafe_equals(resolved, obj)) {
 736           bool evac;
 737           resolved = _heap->evacuate_object(obj, _thread, evac);
 738         }
 739         oopDesc::encode_store_heap_oop(p, resolved);
 740       }
 741     }
 742   }
 743 
 744 public:
 745   void do_oop(oop* p) {
 746     do_oop_work(p);
 747   }
 748   void do_oop(narrowOop* p) {
 749     do_oop_work(p);
 750   }
 751 };
 752 
 753 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 754 private:
 755   ShenandoahHeap* _heap;
 756   Thread* _thread;
 757 public:
 758   ShenandoahEvacuateRootsClosure() :
 759           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 760   }
 761 
 762 private:
 763   template <class T>
 764   void do_oop_work(T* p) {
 765     T o = oopDesc::load_heap_oop(p);
 766     if (! oopDesc::is_null(o)) {
 767       oop obj = oopDesc::decode_heap_oop_not_null(o);
 768       if (_heap->in_collection_set(obj)) {
 769         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 770         if (oopDesc::unsafe_equals(resolved, obj)) {
 771           bool evac;
 772           _heap->evacuate_object(obj, _thread, evac);
 773         }
 774       }
 775     }
 776   }
 777 
 778 public:
 779   void do_oop(oop* p) {
 780     do_oop_work(p);
 781   }
 782   void do_oop(narrowOop* p) {
 783     do_oop_work(p);
 784   }
 785 };
 786 
 787 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 788 private:
 789   ShenandoahHeap* const _heap;
 790   Thread* const _thread;
 791 public:
 792   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 793     _heap(heap), _thread(Thread::current()) {}
 794 
 795   void do_object(oop p) {
 796     assert(_heap->is_marked(p), "expect only marked objects");
 797     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 798       bool evac;
 799       _heap->evacuate_object(p, _thread, evac);
 800     }
 801   }
 802 };
 803 
 804 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 805 private:
 806   ShenandoahHeap* const _sh;
 807   ShenandoahCollectionSet* const _cs;
 808   volatile jbyte _claimed_codecache;
 809 
 810   bool claim_codecache() {
 811     jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0);
 812     return old == 0;
 813   }
 814 public:
 815   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 816                          ShenandoahCollectionSet* cs) :
 817     AbstractGangTask("Parallel Evacuation Task"),
 818     _cs(cs),
 819     _sh(sh),
 820     _claimed_codecache(0)
 821   {}
 822 
 823   void work(uint worker_id) {
 824 
 825     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 826 
 827     // If concurrent code cache evac is enabled, evacuate it here.
 828     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 829     // nmethods. The update would be handled elsewhere.
 830     if (ShenandoahConcurrentEvacCodeRoots && claim_codecache()) {
 831       ShenandoahEvacuateRootsClosure cl;
 832       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 833       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 834       CodeCache::blobs_do(&blobs);
 835     }
 836 
 837     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 838     ShenandoahHeapRegion* r;
 839     while ((r =_cs->claim_next()) != NULL) {
 840       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 841                                     worker_id,
 842                                     r->region_number());
 843 
 844       assert(r->has_live(), "all-garbage regions are reclaimed early");
 845       _sh->marked_object_iterate(r, &cl);
 846 
 847       if (_sh->check_cancelled_concgc_and_yield()) {
 848         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 849         break;
 850       }
 851     }
 852   }
 853 };
 854 
 855 void ShenandoahHeap::trash_cset_regions() {
 856   ShenandoahHeapLocker locker(lock());
 857 
 858   ShenandoahCollectionSet* set = collection_set();
 859   ShenandoahHeapRegion* r;
 860   set->clear_current_index();
 861   while ((r = set->next()) != NULL) {
 862     r->make_trash();
 863   }
 864   collection_set()->clear();
 865 }
 866 
 867 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 868   st->print_cr("Heap Regions:");
 869   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 870   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 871   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 872   st->print_cr("FTS=first use timestamp, LTS=last use timestamp");
 873 
 874   _ordered_regions->print_on(st);
 875 }
 876 
 877 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 878   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 879 
 880   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 881   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 882   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 883   size_t index = start->region_number() + required_regions - 1;
 884 
 885   assert(!start->has_live(), "liveness must be zero");
 886   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 887 
 888   for(size_t i = 0; i < required_regions; i++) {
 889     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 890     // as it expects that every region belongs to a humongous region starting with a humongous start region.
 891     ShenandoahHeapRegion* region = _ordered_regions->get(index --);
 892 
 893     LogTarget(Trace, gc, humongous) lt;
 894     if (lt.is_enabled()) {
 895       ResourceMark rm;
 896       LogStream ls(lt);
 897       region->print_on(&ls);
 898     }
 899 
 900     assert(region->is_humongous(), "expect correct humongous start or continuation");
 901     assert(!in_collection_set(region), "Humongous region should not be in collection set");
 902 
 903     region->make_trash();
 904   }
 905   return required_regions;
 906 }
 907 
 908 #ifdef ASSERT
 909 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
 910   bool heap_region_do(ShenandoahHeapRegion* r) {
 911     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
 912     return false;
 913   }
 914 };
 915 #endif
 916 
 917 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
 918   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
 919 
 920   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
 921 
 922   if (!cancelled_concgc()) {
 923     // Allocations might have happened before we STWed here, record peak:
 924     shenandoahPolicy()->record_peak_occupancy();
 925 
 926     ensure_parsability(true);
 927 
 928     if (ShenandoahVerify) {
 929       verifier()->verify_after_concmark();
 930     }
 931 
 932     trash_cset_regions();
 933 
 934     // NOTE: This needs to be done during a stop the world pause, because
 935     // putting regions into the collection set concurrently with Java threads
 936     // will create a race. In particular, acmp could fail because when we
 937     // resolve the first operand, the containing region might not yet be in
 938     // the collection set, and thus return the original oop. When the 2nd
 939     // operand gets resolved, the region could be in the collection set
 940     // and the oop gets evacuated. If both operands have originally been
 941     // the same, we get false negatives.
 942 
 943     {
 944       ShenandoahHeapLocker locker(lock());
 945       _collection_set->clear();
 946       _free_regions->clear();
 947 
 948 #ifdef ASSERT
 949       ShenandoahCheckCollectionSetClosure ccsc;
 950       _ordered_regions->heap_region_iterate(&ccsc);
 951 #endif
 952 
 953       _shenandoah_policy->choose_collection_set(_collection_set);
 954 
 955       _shenandoah_policy->choose_free_set(_free_regions);
 956     }
 957 
 958     _bytes_allocated_since_cm = 0;
 959 
 960     Universe::update_heap_info_at_gc();
 961 
 962     if (ShenandoahVerify) {
 963       verifier()->verify_before_evacuation();
 964     }
 965   }
 966 }
 967 
 968 
 969 class ShenandoahRetireTLABClosure : public ThreadClosure {
 970 private:
 971   bool _retire;
 972 
 973 public:
 974   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
 975 
 976   void do_thread(Thread* thread) {
 977     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
 978     thread->gclab().make_parsable(_retire);
 979   }
 980 };
 981 
 982 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
 983   if (UseTLAB) {
 984     CollectedHeap::ensure_parsability(retire_tlabs);
 985     ShenandoahRetireTLABClosure cl(retire_tlabs);
 986     Threads::java_threads_do(&cl);
 987     gc_threads_do(&cl);
 988   }
 989 }
 990 
 991 
 992 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
 993   ShenandoahRootEvacuator* _rp;
 994 public:
 995 
 996   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
 997     AbstractGangTask("Shenandoah evacuate and update roots"),
 998     _rp(rp)
 999   {
1000     // Nothing else to do.
1001   }
1002 
1003   void work(uint worker_id) {
1004     ShenandoahEvacuateUpdateRootsClosure cl;
1005 
1006     if (ShenandoahConcurrentEvacCodeRoots) {
1007       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1008     } else {
1009       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1010       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1011     }
1012   }
1013 };
1014 
1015 class ShenandoahFixRootsTask : public AbstractGangTask {
1016   ShenandoahRootEvacuator* _rp;
1017 public:
1018 
1019   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1020     AbstractGangTask("Shenandoah update roots"),
1021     _rp(rp)
1022   {
1023     // Nothing else to do.
1024   }
1025 
1026   void work(uint worker_id) {
1027     ShenandoahUpdateRefsClosure cl;
1028     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1029 
1030     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1031   }
1032 };
1033 
1034 void ShenandoahHeap::evacuate_and_update_roots() {
1035 
1036 #if defined(COMPILER2) || INCLUDE_JVMCI
1037   DerivedPointerTable::clear();
1038 #endif
1039   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1040 
1041   {
1042     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1043     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1044     workers()->run_task(&roots_task);
1045   }
1046 
1047 #if defined(COMPILER2) || INCLUDE_JVMCI
1048   DerivedPointerTable::update_pointers();
1049 #endif
1050   if (cancelled_concgc()) {
1051     fixup_roots();
1052   }
1053 }
1054 
1055 
1056 void ShenandoahHeap::fixup_roots() {
1057     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1058 
1059     // If initial evacuation has been cancelled, we need to update all references
1060     // after all workers have finished. Otherwise we might run into the following problem:
1061     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1062     // GC thread 2 evacuates the same object X to to-space
1063     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1064     // clear() and update_pointers() must always be called in pairs,
1065     // cannot nest with above clear()/update_pointers().
1066 #if defined(COMPILER2) || INCLUDE_JVMCI
1067     DerivedPointerTable::clear();
1068 #endif
1069     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1070     ShenandoahFixRootsTask update_roots_task(&rp);
1071     workers()->run_task(&update_roots_task);
1072 #if defined(COMPILER2) || INCLUDE_JVMCI
1073     DerivedPointerTable::update_pointers();
1074 #endif
1075 }
1076 
1077 void ShenandoahHeap::do_evacuation() {
1078   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
1079 
1080   LogTarget(Trace, gc, region) lt_region;
1081   LogTarget(Trace, gc, cset) lt_cset;
1082 
1083   if (lt_region.is_enabled()) {
1084     ResourceMark rm;
1085     LogStream ls(lt_region);
1086     ls.print_cr("All available regions:");
1087     print_heap_regions_on(&ls);
1088   }
1089 
1090   if (lt_cset.is_enabled()) {
1091     ResourceMark rm;
1092     LogStream ls(lt_cset);
1093     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1094     _collection_set->print_on(&ls);
1095 
1096     ls.print_cr("Free set:");
1097     _free_regions->print_on(&ls);
1098   }
1099 
1100   ShenandoahParallelEvacuationTask task(this, _collection_set);
1101   workers()->run_task(&task);
1102 
1103   if (lt_cset.is_enabled()) {
1104     ResourceMark rm;
1105     LogStream ls(lt_cset);
1106     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1107                _collection_set->count());
1108     _collection_set->print_on(&ls);
1109 
1110     ls.print_cr("After evacuation free set:");
1111     _free_regions->print_on(&ls);
1112   }
1113 
1114   if (lt_region.is_enabled()) {
1115     ResourceMark rm;
1116     LogStream ls(lt_region);
1117     ls.print_cr("All regions after evacuation:");
1118     print_heap_regions_on(&ls);
1119   }
1120 }
1121 
1122 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1123   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1124 
1125   CodeBlobToOopClosure blobsCl(cl, false);
1126   CLDToOopClosure cldCl(cl);
1127 
1128   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1129   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1130 }
1131 
1132 bool ShenandoahHeap::supports_tlab_allocation() const {
1133   return true;
1134 }
1135 
1136 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1137   return MIN2(_free_regions->unsafe_peek_free(), max_tlab_size());
1138 }
1139 
1140 size_t ShenandoahHeap::max_tlab_size() const {
1141   return ShenandoahHeapRegion::max_tlab_size_bytes();
1142 }
1143 
1144 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1145 public:
1146   void do_thread(Thread* thread) {
1147     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1148     thread->gclab().resize();
1149   }
1150 };
1151 
1152 void ShenandoahHeap::resize_all_tlabs() {
1153   CollectedHeap::resize_all_tlabs();
1154 
1155   ShenandoahResizeGCLABClosure cl;
1156   Threads::java_threads_do(&cl);
1157   gc_threads_do(&cl);
1158 }
1159 
1160 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1161 public:
1162   void do_thread(Thread* thread) {
1163     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1164     thread->gclab().accumulate_statistics();
1165     thread->gclab().initialize_statistics();
1166   }
1167 };
1168 
1169 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1170   ShenandoahAccumulateStatisticsGCLABClosure cl;
1171   Threads::java_threads_do(&cl);
1172   gc_threads_do(&cl);
1173 }
1174 
1175 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1176   return true;
1177 }
1178 
1179 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1180   // Overridden to do nothing.
1181   return new_obj;
1182 }
1183 
1184 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1185   return true;
1186 }
1187 
1188 bool ShenandoahHeap::card_mark_must_follow_store() const {
1189   return false;
1190 }
1191 
1192 void ShenandoahHeap::collect(GCCause::Cause cause) {
1193   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1194   if (GCCause::is_user_requested_gc(cause)) {
1195     if (!DisableExplicitGC) {
1196       if (ExplicitGCInvokesConcurrent) {
1197         _concurrent_gc_thread->do_conc_gc();
1198       } else {
1199         _concurrent_gc_thread->do_full_gc(cause);
1200       }
1201     }
1202   } else if (cause == GCCause::_allocation_failure) {
1203     collector_policy()->set_should_clear_all_soft_refs(true);
1204     _concurrent_gc_thread->do_full_gc(cause);
1205   }
1206 }
1207 
1208 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1209   //assert(false, "Shouldn't need to do full collections");
1210 }
1211 
1212 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1213   Unimplemented();
1214   return NULL;
1215 
1216 }
1217 
1218 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1219   return _shenandoah_policy;
1220 }
1221 
1222 
1223 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1224   Space* sp = heap_region_containing(addr);
1225   if (sp != NULL) {
1226     return sp->block_start(addr);
1227   }
1228   return NULL;
1229 }
1230 
1231 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1232   Space* sp = heap_region_containing(addr);
1233   assert(sp != NULL, "block_size of address outside of heap");
1234   return sp->block_size(addr);
1235 }
1236 
1237 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1238   Space* sp = heap_region_containing(addr);
1239   return sp->block_is_obj(addr);
1240 }
1241 
1242 jlong ShenandoahHeap::millis_since_last_gc() {
1243   return 0;
1244 }
1245 
1246 void ShenandoahHeap::prepare_for_verify() {
1247   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1248     ensure_parsability(false);
1249   }
1250 }
1251 
1252 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1253   workers()->print_worker_threads_on(st);
1254 }
1255 
1256 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1257   workers()->threads_do(tcl);
1258 }
1259 
1260 void ShenandoahHeap::print_tracing_info() const {
1261   LogTarget(Info, gc, stats) lt;
1262   if (lt.is_enabled()) {
1263     ResourceMark rm;
1264     LogStream ls(lt);
1265 
1266     phase_timings()->print_on(&ls);
1267 
1268     ls.cr();
1269     ls.cr();
1270 
1271     shenandoahPolicy()->print_gc_stats(&ls);
1272 
1273     ls.cr();
1274     ls.cr();
1275 
1276     if (ShenandoahAllocationTrace) {
1277       assert(alloc_tracker() != NULL, "Must be");
1278       alloc_tracker()->print_on(&ls);
1279     } else {
1280       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1281     }
1282   }
1283 }
1284 
1285 void ShenandoahHeap::verify(VerifyOption vo) {
1286   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1287     if (ShenandoahVerify) {
1288       verifier()->verify_generic(vo);
1289     } else {
1290       // TODO: Consider allocating verification bitmaps on demand,
1291       // and turn this on unconditionally.
1292     }
1293   }
1294 }
1295 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1296   return _free_regions->capacity();
1297 }
1298 
1299 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1300   ObjectClosure* _cl;
1301 public:
1302   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1303   bool heap_region_do(ShenandoahHeapRegion* r) {
1304     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1305     return false;
1306   }
1307 };
1308 
1309 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1310   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1311   heap_region_iterate(&blk, false, true);
1312 }
1313 
1314 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1315 private:
1316   ShenandoahHeap* _heap;
1317 
1318 public:
1319   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1320 
1321 private:
1322   template <class T>
1323   inline void do_oop_work(T* p) {
1324     T o = oopDesc::load_heap_oop(p);
1325     if (!oopDesc::is_null(o)) {
1326       oop obj = oopDesc::decode_heap_oop_not_null(o);
1327       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1328     }
1329   }
1330 public:
1331   void do_oop(oop* p) {
1332     do_oop_work(p);
1333   }
1334   void do_oop(narrowOop* p) {
1335     do_oop_work(p);
1336   }
1337 };
1338 
1339 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1340 private:
1341   ObjectClosure* _cl;
1342 public:
1343   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1344 
1345   virtual void do_object(oop obj) {
1346     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1347             "avoid double-counting: only non-forwarded objects here");
1348 
1349     // Fix up the ptrs.
1350     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1351     obj->oop_iterate(&adjust_ptrs);
1352 
1353     // Can reply the object now:
1354     _cl->do_object(obj);
1355   }
1356 };
1357 
1358 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1359   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1360 
1361   // Safe iteration does objects only with correct references.
1362   // This is why we skip collection set regions that have stale copies of objects,
1363   // and fix up the pointers in the returned objects.
1364 
1365   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1366   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1367   heap_region_iterate(&blk,
1368                       /* skip_cset_regions = */ true,
1369                       /* skip_humongous_continuations = */ true);
1370 
1371   _need_update_refs = false; // already updated the references
1372 }
1373 
1374 // Apply blk->heap_region_do() on all committed regions in address order,
1375 // terminating the iteration early if heap_region_do() returns true.
1376 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1377   for (size_t i = 0; i < num_regions(); i++) {
1378     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1379     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1380       continue;
1381     }
1382     if (skip_cset_regions && in_collection_set(current)) {
1383       continue;
1384     }
1385     if (blk->heap_region_do(current)) {
1386       return;
1387     }
1388   }
1389 }
1390 
1391 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1392 private:
1393   ShenandoahHeap* sh;
1394 public:
1395   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1396 
1397   bool heap_region_do(ShenandoahHeapRegion* r) {
1398     r->clear_live_data();
1399     sh->set_top_at_mark_start(r->bottom(), r->top());
1400     return false;
1401   }
1402 };
1403 
1404 void ShenandoahHeap::start_concurrent_marking() {
1405   if (ShenandoahVerify) {
1406     verifier()->verify_before_concmark();
1407   }
1408 
1409   {
1410     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1411     accumulate_statistics_all_tlabs();
1412   }
1413 
1414   set_concurrent_mark_in_progress(true);
1415   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1416   if (UseTLAB) {
1417     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1418     ensure_parsability(true);
1419   }
1420 
1421   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1422   _used_start_gc = used();
1423 
1424   {
1425     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1426     ShenandoahClearLivenessClosure clc(this);
1427     heap_region_iterate(&clc);
1428   }
1429 
1430   // Make above changes visible to worker threads
1431   OrderAccess::fence();
1432 
1433   concurrentMark()->init_mark_roots();
1434 
1435   if (UseTLAB) {
1436     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1437     resize_all_tlabs();
1438   }
1439 }
1440 
1441 void ShenandoahHeap::stop_concurrent_marking() {
1442   assert(concurrent_mark_in_progress(), "How else could we get here?");
1443   if (! cancelled_concgc()) {
1444     // If we needed to update refs, and concurrent marking has been cancelled,
1445     // we need to finish updating references.
1446     set_need_update_refs(false);
1447   }
1448   set_concurrent_mark_in_progress(false);
1449 
1450   LogTarget(Trace, gc, region) lt;
1451   if (lt.is_enabled()) {
1452     ResourceMark rm;
1453     LogStream ls(lt);
1454     ls.print_cr("Regions at stopping the concurrent mark:");
1455     print_heap_regions_on(&ls);
1456   }
1457 }
1458 
1459 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1460   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1461   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1462 }
1463 
1464 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1465   // Note: it is important to first release the _evacuation_in_progress flag here,
1466   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1467   // in case a VM task is pending.
1468   set_evacuation_in_progress(in_progress);
1469   MutexLocker mu(Threads_lock);
1470   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1471 }
1472 
1473 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1474   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1475   set_evacuation_in_progress(in_progress);
1476   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1477 }
1478 
1479 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1480   _evacuation_in_progress = in_progress ? 1 : 0;
1481   OrderAccess::fence();
1482 }
1483 
1484 void ShenandoahHeap::oom_during_evacuation() {
1485   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1486                         Thread::current()->osthread()->thread_id());
1487 
1488   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1489   collector_policy()->set_should_clear_all_soft_refs(true);
1490   concurrent_thread()->try_set_full_gc();
1491   cancel_concgc(_oom_evacuation);
1492 
1493   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1494     assert(! Threads_lock->owned_by_self()
1495            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
1496     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1497     while (_evacuation_in_progress) { // wait.
1498       Thread::current()->_ParkEvent->park(1);
1499     }
1500   }
1501 
1502 }
1503 
1504 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1505   // Initialize Brooks pointer for the next object
1506   HeapWord* result = obj + BrooksPointer::word_size();
1507   BrooksPointer::initialize(oop(result));
1508   return result;
1509 }
1510 
1511 uint ShenandoahHeap::oop_extra_words() {
1512   return BrooksPointer::word_size();
1513 }
1514 
1515 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1516   _heap(ShenandoahHeap::heap_no_check()) {
1517 }
1518 
1519 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1520   assert(_heap != NULL, "sanity");
1521   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1522 #ifdef ASSERT
1523   if (_heap->concurrent_mark_in_progress()) {
1524     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1525   }
1526 #endif
1527   assert(!oopDesc::is_null(obj), "null");
1528   return _heap->is_marked(obj);
1529 }
1530 
1531 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1532   _heap(ShenandoahHeap::heap_no_check()) {
1533 }
1534 
1535 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1536   assert(_heap != NULL, "sanity");
1537   assert(!oopDesc::is_null(obj), "null");
1538   assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1539   return _heap->is_marked(obj);
1540 }
1541 
1542 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1543   return need_update_refs() ?
1544          (BoolObjectClosure*) &_forwarded_is_alive :
1545          (BoolObjectClosure*) &_is_alive;
1546 }
1547 
1548 void ShenandoahHeap::ref_processing_init() {
1549   MemRegion mr = reserved_region();
1550 
1551   _forwarded_is_alive.init(ShenandoahHeap::heap());
1552   _is_alive.init(ShenandoahHeap::heap());
1553   assert(_max_workers > 0, "Sanity");
1554 
1555   _ref_processor =
1556     new ReferenceProcessor(mr,    // span
1557                            ParallelRefProcEnabled,  // MT processing
1558                            _max_workers,            // Degree of MT processing
1559                            true,                    // MT discovery
1560                            _max_workers,            // Degree of MT discovery
1561                            false,                   // Reference discovery is not atomic
1562                            &_forwarded_is_alive);   // Pessimistically assume "forwarded"
1563 }
1564 
1565 
1566 GCTracer* ShenandoahHeap::tracer() {
1567   return shenandoahPolicy()->tracer();
1568 }
1569 
1570 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1571   return _free_regions->used();
1572 }
1573 
1574 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1575   if (try_cancel_concgc()) {
1576     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1577     _shenandoah_policy->report_concgc_cancelled();
1578   }
1579 }
1580 
1581 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
1582   if (try_cancel_concgc()) {
1583     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
1584     _shenandoah_policy->report_concgc_cancelled();
1585   }
1586 }
1587 
1588 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
1589   switch (cause) {
1590     case _oom_evacuation:
1591       return "Out of memory for evacuation";
1592     case _vm_stop:
1593       return "Stopping VM";
1594     default:
1595       return "Unknown";
1596   }
1597 }
1598 
1599 uint ShenandoahHeap::max_workers() {
1600   return _max_workers;
1601 }
1602 
1603 void ShenandoahHeap::stop() {
1604   // The shutdown sequence should be able to terminate when GC is running.
1605 
1606   // Step 0. Notify policy to disable event recording.
1607   _shenandoah_policy->record_shutdown();
1608 
1609   // Step 1. Notify control thread that we are in shutdown.
1610   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1611   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1612   _concurrent_gc_thread->prepare_for_graceful_shutdown();
1613 
1614   // Step 2. Notify GC workers that we are cancelling GC.
1615   cancel_concgc(_vm_stop);
1616 
1617   // Step 3. Wait until GC worker exits normally.
1618   _concurrent_gc_thread->stop();
1619 }
1620 
1621 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1622   ShenandoahPhaseTimings::Phase phase_root =
1623           full_gc ?
1624           ShenandoahPhaseTimings::full_gc_purge :
1625           ShenandoahPhaseTimings::purge;
1626 
1627   ShenandoahPhaseTimings::Phase phase_unload =
1628           full_gc ?
1629           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1630           ShenandoahPhaseTimings::purge_class_unload;
1631 
1632   ShenandoahPhaseTimings::Phase phase_cldg =
1633           full_gc ?
1634           ShenandoahPhaseTimings::full_gc_purge_cldg :
1635           ShenandoahPhaseTimings::purge_cldg;
1636 
1637   ShenandoahPhaseTimings::Phase phase_par =
1638           full_gc ?
1639           ShenandoahPhaseTimings::full_gc_purge_par :
1640           ShenandoahPhaseTimings::purge_par;
1641 
1642   ShenandoahPhaseTimings::Phase phase_par_classes =
1643           full_gc ?
1644           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1645           ShenandoahPhaseTimings::purge_par_classes;
1646 
1647   ShenandoahPhaseTimings::Phase phase_par_codecache =
1648           full_gc ?
1649           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
1650           ShenandoahPhaseTimings::purge_par_codecache;
1651 
1652   ShenandoahPhaseTimings::Phase phase_par_rmt =
1653           full_gc ?
1654           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
1655           ShenandoahPhaseTimings::purge_par_rmt;
1656 
1657   ShenandoahPhaseTimings::Phase phase_par_symbstring =
1658           full_gc ?
1659           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
1660           ShenandoahPhaseTimings::purge_par_symbstring;
1661 
1662   ShenandoahPhaseTimings::Phase phase_par_sync =
1663           full_gc ?
1664           ShenandoahPhaseTimings::full_gc_purge_par_sync :
1665           ShenandoahPhaseTimings::purge_par_sync;
1666 
1667   ShenandoahGCPhase root_phase(phase_root);
1668 
1669   BoolObjectClosure* is_alive = is_alive_closure();
1670 
1671   bool purged_class;
1672 
1673   // Unload classes and purge SystemDictionary.
1674   {
1675     ShenandoahGCPhase phase(phase_unload);
1676     purged_class = SystemDictionary::do_unloading(is_alive,
1677                                                   full_gc ? ShenandoahMarkCompact::gc_timer() : gc_timer(),
1678                                                   true);
1679   }
1680 
1681   {
1682     ShenandoahGCPhase phase(phase_par);
1683     uint active = _workers->active_workers();
1684     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
1685     _workers->run_task(&unlink_task);
1686 
1687     ShenandoahPhaseTimings* p = ShenandoahHeap::heap()->phase_timings();
1688     ParallelCleaningTimes times = unlink_task.times();
1689 
1690     // "times" report total time, phase_tables_cc reports wall time. Divide total times
1691     // by active workers to get average time per worker, that would add up to wall time.
1692     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
1693     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
1694     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
1695     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
1696     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
1697   }
1698 
1699   {
1700     ShenandoahGCPhase phase(phase_cldg);
1701     ClassLoaderDataGraph::purge();
1702   }
1703 }
1704 
1705 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
1706   _need_update_refs = need_update_refs;
1707 }
1708 
1709 //fixme this should be in heapregionset
1710 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
1711   size_t region_idx = r->region_number() + 1;
1712   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
1713   guarantee(next->region_number() == region_idx, "region number must match");
1714   while (next->is_humongous()) {
1715     region_idx = next->region_number() + 1;
1716     next = _ordered_regions->get(region_idx);
1717     guarantee(next->region_number() == region_idx, "region number must match");
1718   }
1719   return next;
1720 }
1721 
1722 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
1723   return _monitoring_support;
1724 }
1725 
1726 MarkBitMap* ShenandoahHeap::mark_bit_map() {
1727   return &_mark_bit_map;
1728 }
1729 
1730 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
1731   _free_regions->add_region(r);
1732 }
1733 
1734 void ShenandoahHeap::clear_free_regions() {
1735   _free_regions->clear();
1736 }
1737 
1738 address ShenandoahHeap::in_cset_fast_test_addr() {
1739   ShenandoahHeap* heap = ShenandoahHeap::heap();
1740   assert(heap->collection_set() != NULL, "Sanity");
1741   return (address) heap->collection_set()->biased_map_address();
1742 }
1743 
1744 address ShenandoahHeap::cancelled_concgc_addr() {
1745   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
1746 }
1747 
1748 
1749 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1750   return ShenandoahMaxRegionSize;
1751 }
1752 
1753 size_t ShenandoahHeap::bytes_allocated_since_cm() {
1754   return _bytes_allocated_since_cm;
1755 }
1756 
1757 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
1758   _bytes_allocated_since_cm = bytes;
1759 }
1760 
1761 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1762   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1763   _top_at_mark_starts[index] = addr;
1764 }
1765 
1766 HeapWord* ShenandoahHeap::top_at_mark_start(HeapWord* region_base) {
1767   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1768   return _top_at_mark_starts[index];
1769 }
1770 
1771 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1772   _full_gc_in_progress = in_progress;
1773 }
1774 
1775 bool ShenandoahHeap::is_full_gc_in_progress() const {
1776   return _full_gc_in_progress;
1777 }
1778 
1779 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1780   _update_refs_in_progress = in_progress;
1781 }
1782 
1783 bool ShenandoahHeap::is_update_refs_in_progress() const {
1784   return _update_refs_in_progress;
1785 }
1786 
1787 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1788   ShenandoahCodeRoots::add_nmethod(nm);
1789 }
1790 
1791 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1792   ShenandoahCodeRoots::remove_nmethod(nm);
1793 }
1794 
1795 void ShenandoahHeap::pin_object(oop o) {
1796   ShenandoahHeapLocker locker(lock());
1797   heap_region_containing(o)->make_pinned();
1798 }
1799 
1800 void ShenandoahHeap::unpin_object(oop o) {
1801   ShenandoahHeapLocker locker(lock());
1802   heap_region_containing(o)->make_unpinned();
1803 }
1804 
1805 GCTimer* ShenandoahHeap::gc_timer() const {
1806   return _gc_timer;
1807 }
1808 
1809 #ifdef ASSERT
1810 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1811   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1812 
1813   if (SafepointSynchronize::is_at_safepoint()) {
1814     if (UseDynamicNumberOfGCThreads ||
1815         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
1816       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1817     } else {
1818       // Use ParallelGCThreads inside safepoints
1819       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
1820     }
1821   } else {
1822     if (UseDynamicNumberOfGCThreads ||
1823         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
1824       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1825     } else {
1826       // Use ConcGCThreads outside safepoints
1827       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1828     }
1829   }
1830 }
1831 #endif
1832 
1833 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
1834 private:
1835   size_t            _garbage;
1836 public:
1837   ShenandoahCountGarbageClosure() : _garbage(0) {
1838   }
1839 
1840   bool heap_region_do(ShenandoahHeapRegion* r) {
1841     if (r->is_regular()) {
1842       _garbage += r->garbage();
1843     }
1844     return false;
1845   }
1846 
1847   size_t garbage() {
1848     return _garbage;
1849   }
1850 };
1851 
1852 size_t ShenandoahHeap::garbage() {
1853   ShenandoahCountGarbageClosure cl;
1854   heap_region_iterate(&cl);
1855   return cl.garbage();
1856 }
1857 
1858 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
1859   return _connection_matrix;
1860 }
1861 
1862 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
1863   return _partial_gc;
1864 }
1865 
1866 void ShenandoahHeap::do_partial_collection() {
1867   partial_gc()->do_partial_collection();
1868 }
1869 
1870 ShenandoahVerifier* ShenandoahHeap::verifier() {
1871   guarantee(ShenandoahVerify, "Should be enabled");
1872   assert (_verifier != NULL, "sanity");
1873   return _verifier;
1874 }
1875 
1876 template<class T>
1877 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
1878 private:
1879   T cl;
1880   ShenandoahHeap* _heap;
1881   ShenandoahHeapRegionSet* _regions;
1882   bool _concurrent;
1883 public:
1884   ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) :
1885     AbstractGangTask("Concurrent Update References Task"),
1886     cl(T()),
1887     _heap(ShenandoahHeap::heap()),
1888     _regions(regions),
1889     _concurrent(concurrent) {
1890   }
1891 
1892   void work(uint worker_id) {
1893     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
1894     ShenandoahHeapRegion* r = _regions->claim_next();
1895     while (r != NULL) {
1896       if (_heap->in_collection_set(r)) {
1897         HeapWord* bottom = r->bottom();
1898         HeapWord* top = _heap->top_at_mark_start(r->bottom());
1899         if (top > bottom) {
1900           _heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top));
1901         }
1902       } else {
1903         if (r->is_active()) {
1904           _heap->marked_object_oop_safe_iterate(r, &cl);
1905         }
1906       }
1907       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
1908         return;
1909       }
1910       r = _regions->claim_next();
1911     }
1912   }
1913 };
1914 
1915 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) {
1916   if (UseShenandoahMatrix) {
1917     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent);
1918     workers()->run_task(&task);
1919   } else {
1920     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent);
1921     workers()->run_task(&task);
1922   }
1923 }
1924 
1925 void ShenandoahHeap::concurrent_update_heap_references() {
1926   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
1927   ShenandoahHeapRegionSet* update_regions = regions();
1928   update_regions->clear_current_index();
1929   update_heap_references(update_regions, true);
1930 }
1931 
1932 void ShenandoahHeap::prepare_update_refs() {
1933   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1934 
1935   if (ShenandoahVerify) {
1936     verifier()->verify_before_updaterefs();
1937   }
1938 
1939   set_evacuation_in_progress_at_safepoint(false);
1940   set_update_refs_in_progress(true);
1941   ensure_parsability(true);
1942   if (UseShenandoahMatrix) {
1943     connection_matrix()->clear_all();
1944   }
1945   for (uint i = 0; i < num_regions(); i++) {
1946     ShenandoahHeapRegion* r = _ordered_regions->get(i);
1947     r->set_concurrent_iteration_safe_limit(r->top());
1948   }
1949 }
1950 
1951 void ShenandoahHeap::finish_update_refs() {
1952   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1953 
1954   if (cancelled_concgc()) {
1955     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
1956 
1957     // Finish updating references where we left off.
1958     clear_cancelled_concgc();
1959     ShenandoahHeapRegionSet* update_regions = regions();
1960     update_heap_references(update_regions, false);
1961   }
1962 
1963   assert(! cancelled_concgc(), "Should have been done right before");
1964   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
1965 
1966   if (ShenandoahStringDedup::is_enabled()) {
1967     ShenandoahGCPhase final_str_dedup_table(ShenandoahPhaseTimings::final_update_refs_dedup_table);
1968     ShenandoahStringDedup::parallel_update_refs();
1969   }
1970 
1971   // Allocations might have happened before we STWed here, record peak:
1972   shenandoahPolicy()->record_peak_occupancy();
1973 
1974   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
1975 
1976   trash_cset_regions();
1977   set_need_update_refs(false);
1978 
1979   if (ShenandoahVerify) {
1980     verifier()->verify_after_updaterefs();
1981   }
1982 
1983   {
1984     // Rebuild the free set
1985     ShenandoahHeapLocker locker(lock());
1986     _free_regions->clear();
1987     size_t end = _ordered_regions->active_regions();
1988     for (size_t i = 0; i < end; i++) {
1989       ShenandoahHeapRegion* r = _ordered_regions->get(i);
1990       if (r->is_alloc_allowed()) {
1991         assert (!in_collection_set(r), "collection set should be clear");
1992         _free_regions->add_region(r);
1993       }
1994     }
1995   }
1996   set_update_refs_in_progress(false);
1997 }
1998 
1999 void ShenandoahHeap::set_alloc_seq_gc_start() {
2000   // Take next number, the start seq number is inclusive
2001   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::alloc_seq_num() + 1;
2002 }
2003 
2004 void ShenandoahHeap::set_alloc_seq_gc_end() {
2005   // Take current number, the end seq number is also inclusive
2006   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::alloc_seq_num();
2007 }
2008 
2009 
2010 #ifdef ASSERT
2011 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2012   _lock.assert_owned_by_current_thread();
2013 }
2014 
2015 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2016   _lock.assert_not_owned_by_current_thread();
2017 }
2018 
2019 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2020   _lock.assert_owned_by_current_thread_or_safepoint();
2021 }
2022 #endif
2023 
2024 void ShenandoahHeap::recycle_trash_assist(size_t limit) {
2025   assert_heaplock_owned_by_current_thread();
2026 
2027   size_t count = 0;
2028   for (size_t i = 0; (i < num_regions()) && (count < limit); i++) {
2029     ShenandoahHeapRegion *r = _ordered_regions->get(i);
2030     if (r->is_trash()) {
2031       decrease_used(r->used());
2032       r->recycle();
2033       _free_regions->add_region(r);
2034       count++;
2035     }
2036   }
2037 }
2038 
2039 void ShenandoahHeap::recycle_trash() {
2040   // lock is not reentrable, check we don't have it
2041   assert_heaplock_not_owned_by_current_thread();
2042 
2043   size_t bytes_reclaimed = 0;
2044 
2045   if (UseShenandoahMatrix) {
2046     // The complication for matrix cleanup is that we want the batched update
2047     // to alleviate costs. We also cannot add regions to freeset until matrix
2048     // is clean, otherwise we race with the actual allocations.
2049 
2050     size_t count = 0;
2051     for (size_t i = 0; i < num_regions(); i++) {
2052       ShenandoahHeapRegion* r = _ordered_regions->get(i);
2053       if (r->is_trash()) {
2054         ShenandoahHeapLocker locker(lock());
2055         if (r->is_trash()) {
2056           bytes_reclaimed += r->used();
2057           decrease_used(r->used());
2058           r->recycle_no_matrix();
2059           _recycled_regions[count++] = r->region_number();
2060         }
2061       }
2062       SpinPause(); // allow allocators to barge the lock
2063     }
2064 
2065     connection_matrix()->clear_batched(_recycled_regions, count);
2066 
2067     {
2068       ShenandoahHeapLocker locker(lock());
2069       for (size_t i = 0; i < count; i++) {
2070         ShenandoahHeapRegion *r = _ordered_regions->get(_recycled_regions[i]);
2071         _free_regions->add_region(r);
2072       }
2073     }
2074 
2075   } else {
2076     for (size_t i = 0; i < num_regions(); i++) {
2077       ShenandoahHeapRegion* r = _ordered_regions->get(i);
2078       if (r->is_trash()) {
2079         ShenandoahHeapLocker locker(lock());
2080         if (r->is_trash()) {
2081           bytes_reclaimed += r->used();
2082           decrease_used(r->used());
2083           r->recycle();
2084           _free_regions->add_region(r);
2085         }
2086       }
2087       SpinPause(); // allow allocators to barge the lock
2088     }
2089   }
2090 
2091   _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
2092 }
2093 
2094 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2095   print_on(st);
2096   print_heap_regions_on(st);
2097 }
2098 
2099 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) {
2100   size_t len = _bitmap_words_per_region * HeapWordSize;
2101   size_t off = r->region_number() * _bitmap_words_per_region;
2102   if (!os::commit_memory((char*)(_bitmap_region.start() + off), len, false)) {
2103     return false;
2104   }
2105   return true;
2106 }
2107 
2108 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) {
2109   size_t len = _bitmap_words_per_region * HeapWordSize;
2110   size_t off = r->region_number() * _bitmap_words_per_region;
2111   if (!os::uncommit_memory((char*)(_bitmap_region.start() + off), len)) {
2112     return false;
2113   }
2114   return true;
2115 }