< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp

Print this page
rev 11156 : [backport] 8220350: Refactor ShenandoahHeap::initialize
Reviewed-by: rkennke, zgu
rev 11157 : [backport] 8220153: Shenandoah does not work with TransparentHugePages properly
Reviewed-by: rkennke, zgu
rev 11158 : [backport] 8220162: Shenandoah should not commit HugeTLBFS memory
Reviewed-by: rkennke, zgu


  62 #include "memory/metaspace.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "services/mallocTracker.hpp"
  65 
  66 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  67 
  68 #ifdef ASSERT
  69 template <class T>
  70 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  71   T o = oopDesc::load_heap_oop(p);
  72   if (! oopDesc::is_null(o)) {
  73     oop obj = oopDesc::decode_heap_oop_not_null(o);
  74     shenandoah_assert_not_forwarded(p, obj);
  75   }
  76 }
  77 
  78 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  79 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  80 #endif
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahRegionIterator _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap_base;
  88 public:
  89   ShenandoahPretouchTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
  90     AbstractGangTask("Shenandoah PreTouch"),
  91     _bitmap_size(bitmap_size),
  92     _page_size(page_size),
  93     _bitmap_base(bitmap_base) {}
  94 
  95   virtual void work(uint worker_id) {
  96     ShenandoahHeapRegion* r = _regions.next();
  97     while (r != NULL) {
  98       os::pretouch_memory((char*) r->bottom(), (char*) r->end());

















  99 



 100       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 101       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 102       assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size));
 103 
 104       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);
 105 
 106       r = _regions.next();
 107     }
 108   }
 109 };
 110 
 111 jint ShenandoahHeap::initialize() {
 112   CollectedHeap::pre_initialize();
 113 
 114   ShenandoahBrooksPointer::initial_checks();
 115 
 116   initialize_heuristics();
 117 




 118   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 119   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 120   size_t heap_alignment = collector_policy()->heap_alignment();
 121 


 122   if (ShenandoahAlwaysPreTouch) {
 123     // Enabled pre-touch means the entire heap is committed right away.
 124     init_byte_size = max_byte_size;
 125   }
 126 
 127   Universe::check_alignment(max_byte_size,
 128                             ShenandoahHeapRegion::region_size_bytes(),
 129                             "shenandoah heap");
 130   Universe::check_alignment(init_byte_size,
 131                             ShenandoahHeapRegion::region_size_bytes(),
 132                             "shenandoah heap");
 133 
 134   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 135                                                  heap_alignment);
 136 
 137   _reserved.set_word_size(0);
 138   _reserved.set_start((HeapWord*)heap_rs.base());
 139   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 140 
 141   set_barrier_set(new ShenandoahBarrierSet(this));
 142   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 143 
 144   _num_regions = ShenandoahHeapRegion::region_count();
 145 
 146   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 147   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 148   assert(num_committed_regions <= _num_regions, "sanity");
 149 
 150   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 151   _committed = _initial_size;
 152 
 153   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
 154           byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));
 155   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 156     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 157   }
 158 
 159   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 160   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();

 161 
 162   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 163   _free_set = new ShenandoahFreeSet(this, _num_regions);




 164 
 165   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());

 166 
 167   if (ShenandoahPacing) {
 168     _pacer = new ShenandoahPacer(this);
 169     _pacer->setup_for_idle();
 170   } else {
 171     _pacer = NULL;
 172   }
 173 
 174   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 175          err_msg("misaligned heap: " PTR_FORMAT, p2i(base())));
 176 
 177   // The call below uses stuff (the SATB* things) that are in G1, but probably
 178   // belong into a shared location.
 179   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 180                                                SATB_Q_FL_lock,
 181                                                20 /*G1SATBProcessCompletedThreshold */,
 182                                                Shared_SATB_Q_lock);
 183 
 184   // Reserve space for prev and next bitmap.
 185   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 186   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 187   _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size);
 188   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 189 
 190   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 191 
 192   guarantee(bitmap_bytes_per_region != 0,
 193             err_msg("Bitmap bytes per region should not be zero"));
 194   guarantee(is_power_of_2(bitmap_bytes_per_region),
 195             err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region));
 196 
 197   if (bitmap_page_size > bitmap_bytes_per_region) {
 198     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 199     _bitmap_bytes_per_slice = bitmap_page_size;
 200   } else {
 201     _bitmap_regions_per_slice = 1;
 202     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 203   }
 204 
 205   guarantee(_bitmap_regions_per_slice >= 1,
 206             err_msg("Should have at least one region per slice: " SIZE_FORMAT,
 207                     _bitmap_regions_per_slice));
 208 
 209   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 210             err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 211                     _bitmap_bytes_per_slice, bitmap_page_size));
 212 
 213   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 214   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 215   _bitmap_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);

 216 
 217   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 218                               align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 219   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 220   os::commit_memory_or_exit((char *) (_bitmap_region.start()), bitmap_init_commit, false,
 221                             "couldn't allocate initial bitmap");


 222 
 223   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 224 
 225   if (ShenandoahVerify) {
 226     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 227     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 228                               "couldn't allocate verification bitmap");


 229     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 230     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 231     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 232     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 233   }
 234 
 235   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);













 236 
 237   {
 238     ShenandoahHeapLocker locker(lock());



 239     for (size_t i = 0; i < _num_regions; i++) {
 240       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 241                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 242                                                          reg_size_words,
 243                                                          i,
 244                                                          i < num_committed_regions);
 245 
 246       _marking_context->initialize_top_at_mark_start(r);
 247       _regions[i] = r;
 248       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 249     }
 250 
 251     // Initialize to complete
 252     _marking_context->mark_complete();
 253 
 254     _free_set->rebuild();
 255   }
 256 
 257   if (ShenandoahAlwaysPreTouch) {
 258     assert (!AlwaysPreTouch, "Should have been overridden");
 259 
 260     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 261     // before initialize() below zeroes it with initializing thread. For any given region,
 262     // we touch the region and the corresponding bitmaps from the same thread.
 263     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 264 
 265     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 266                        _num_regions, page_size);
 267     ShenandoahPretouchTask cl(bitmap0.base(), _bitmap_size, page_size);
 268     _workers->run_task(&cl);






 269   }

 270 
 271   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 272   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 273   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 274   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 275   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 276 
 277   _monitoring_support = new ShenandoahMonitoringSupport(this);





















 278 








 279   _phase_timings = new ShenandoahPhaseTimings();


 280 
 281   if (ShenandoahAllocationTrace) {
 282     _alloc_tracker = new ShenandoahAllocTracker();
 283   }
 284 
 285   ShenandoahStringDedup::initialize();





 286 
 287   _control_thread = new ShenandoahControlThread();
 288 
 289   ShenandoahCodeRoots::initialize();
 290 
 291   _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
 292   for (uint worker = 0; worker < _max_workers; worker++) {
 293     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
 294     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
 295   }
 296 
 297   return JNI_OK;
 298 }
 299 
 300 #ifdef _MSC_VER
 301 #pragma warning( push )
 302 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 303 #endif
 304 
 305 void ShenandoahHeap::initialize_heuristics() {
 306   if (ShenandoahGCHeuristics != NULL) {
 307     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 308       _heuristics = new ShenandoahAggressiveHeuristics();
 309     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 310       _heuristics = new ShenandoahStaticHeuristics();
 311     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 312       _heuristics = new ShenandoahAdaptiveHeuristics();
 313     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 314       _heuristics = new ShenandoahPassiveHeuristics();
 315     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {


 321     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 322       vm_exit_during_initialization(
 323               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 324                       _heuristics->name()));
 325     }
 326     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 327       vm_exit_during_initialization(
 328               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 329                       _heuristics->name()));
 330     }
 331     log_info(gc, init)("Shenandoah heuristics: %s",
 332                        _heuristics->name());
 333   } else {
 334     ShouldNotReachHere();
 335   }
 336 }
 337 
 338 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 339   SharedHeap(policy),
 340   _shenandoah_policy(policy),

 341   _regions(NULL),
 342   _free_set(NULL),
 343   _collection_set(NULL),
 344   _update_refs_iterator(this),
 345   _bytes_allocated_since_gc_start(0),
 346   _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
 347   _ref_processor(NULL),
 348   _marking_context(NULL),






 349   _aux_bit_map(),
 350   _verifier(NULL),
 351   _pacer(NULL),
 352   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 353   _phase_timings(NULL),
 354   _alloc_tracker(NULL)
 355 {
 356   log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 357   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 358 
 359   _scm = new ShenandoahConcurrentMark();
 360   _full_gc = new ShenandoahMarkCompact();
 361   _used = 0;
 362 
 363   _max_workers = MAX2(_max_workers, 1U);
 364   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 365                             /* are_GC_task_threads */true,
 366                             /* are_ConcurrentGC_threads */false);
 367   if (_workers == NULL) {
 368     vm_exit_during_initialization("Failed necessary allocation.");


1128   // No-op.
1129 }
1130 
1131 /*
1132  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1133  *
1134  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1135  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1136  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1137  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1138  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1139  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1140  * wiped the bitmap in preparation for next marking).
1141  *
1142  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1143  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1144  * is allowed to report dead objects, but is not required to do so.
1145  */
1146 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1147   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1148   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1149     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1150     return;
1151   }
1152 
1153   // Reset bitmap
1154   _aux_bit_map.clear();
1155 
1156   Stack<oop,mtGC> oop_stack;
1157 
1158   // First, we process all GC roots. This populates the work stack with initial objects.
1159   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1160   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1161   CLDToOopClosure clds(&oops, false);
1162   CodeBlobToOopClosure blobs(&oops, false);
1163   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1164 
1165   // Work through the oop stack to traverse heap.
1166   while (! oop_stack.is_empty()) {
1167     oop obj = oop_stack.pop();
1168     assert(obj->is_oop(), "must be a valid oop");
1169     cl->do_object(obj);
1170     obj->oop_iterate(&oops);
1171   }
1172 
1173   assert(oop_stack.is_empty(), "should be empty");
1174 
1175   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1176     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1177   }
1178 }
1179 
1180 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1181   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1182   object_iterate(cl);
1183 }
1184 
1185 void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) {
1186   ObjectToOopClosure cl2(cl);
1187   object_iterate(&cl2);
1188 }
1189 
1190 class ShenandoahSpaceClosureRegionClosure: public ShenandoahHeapRegionClosure {
1191   SpaceClosure* _cl;
1192 public:
1193   ShenandoahSpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
1194   void heap_region_do(ShenandoahHeapRegion* r) {
1195     _cl->do_space(r);


2081 }
2082 
2083 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2084   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2085 
2086   size_t regions_from = _bitmap_regions_per_slice * slice;
2087   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2088   for (size_t g = regions_from; g < regions_to; g++) {
2089     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2090     if (skip_self && g == r->region_number()) continue;
2091     if (get_region(g)->is_committed()) {
2092       return true;
2093     }
2094   }
2095   return false;
2096 }
2097 
2098 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2099   assert_heaplock_owned_by_current_thread();
2100 





2101   if (is_bitmap_slice_committed(r, true)) {
2102     // Some other region from the group is already committed, meaning the bitmap
2103     // slice is already committed, we exit right away.
2104     return true;
2105   }
2106 
2107   // Commit the bitmap slice:
2108   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2109   size_t off = _bitmap_bytes_per_slice * slice;
2110   size_t len = _bitmap_bytes_per_slice;
2111   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2112     return false;
2113   }
2114   return true;
2115 }
2116 
2117 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2118   assert_heaplock_owned_by_current_thread();
2119 





2120   if (is_bitmap_slice_committed(r, true)) {
2121     // Some other region from the group is still committed, meaning the bitmap
2122     // slice is should stay committed, exit right away.
2123     return true;
2124   }
2125 
2126   // Uncommit the bitmap slice:
2127   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2128   size_t off = _bitmap_bytes_per_slice * slice;
2129   size_t len = _bitmap_bytes_per_slice;
2130   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2131     return false;
2132   }
2133   return true;
2134 }
2135 
2136 void ShenandoahHeap::vmop_entry_init_mark() {
2137   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2138   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2139   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);


2522 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2523   switch (point) {
2524     case _degenerated_unset:
2525       return "Pause Degenerated GC (<UNSET>)";
2526     case _degenerated_outside_cycle:
2527       return "Pause Degenerated GC (Outside of Cycle)";
2528     case _degenerated_mark:
2529       return "Pause Degenerated GC (Mark)";
2530     case _degenerated_evac:
2531       return "Pause Degenerated GC (Evacuation)";
2532     case _degenerated_updaterefs:
2533       return "Pause Degenerated GC (Update Refs)";
2534     default:
2535       ShouldNotReachHere();
2536       return "ERROR";
2537   }
2538 }
2539 
2540 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2541 #ifdef ASSERT

2542   assert(worker_id < _max_workers, "sanity");
2543   for (uint i = 0; i < num_regions(); i++) {
2544     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2545   }
2546 #endif
2547   return _liveness_cache[worker_id];
2548 }
2549 
2550 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2551   assert(worker_id < _max_workers, "sanity");

2552   jushort* ld = _liveness_cache[worker_id];
2553   for (uint i = 0; i < num_regions(); i++) {
2554     ShenandoahHeapRegion* r = get_region(i);
2555     jushort live = ld[i];
2556     if (live > 0) {
2557       r->increase_live_data_gc_words(live);
2558       ld[i] = 0;
2559     }
2560   }
2561 }
2562 
2563 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
2564   return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
2565                                                          : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
2566 }


  62 #include "memory/metaspace.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "services/mallocTracker.hpp"
  65 
  66 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  67 
  68 #ifdef ASSERT
  69 template <class T>
  70 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  71   T o = oopDesc::load_heap_oop(p);
  72   if (! oopDesc::is_null(o)) {
  73     oop obj = oopDesc::decode_heap_oop_not_null(o);
  74     shenandoah_assert_not_forwarded(p, obj);
  75   }
  76 }
  77 
  78 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  79 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  80 #endif
  81 
  82 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  83 private:
  84   ShenandoahRegionIterator _regions;

  85   const size_t _page_size;

  86 public:
  87   ShenandoahPretouchHeapTask(size_t page_size) :
  88     AbstractGangTask("Shenandoah Pretouch Heap"),
  89     _page_size(page_size) {}


  90 
  91   virtual void work(uint worker_id) {
  92     ShenandoahHeapRegion* r = _regions.next();
  93     while (r != NULL) {
  94       os::pretouch_memory((char*) r->bottom(), (char*) r->end());
  95       r = _regions.next();
  96     }
  97   }
  98 };
  99 
 100 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
 101 private:
 102   ShenandoahRegionIterator _regions;
 103   char* _bitmap_base;
 104   const size_t _bitmap_size;
 105   const size_t _page_size;
 106 public:
 107   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 108     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 109     _bitmap_base(bitmap_base),
 110     _bitmap_size(bitmap_size),
 111     _page_size(page_size) {}
 112 
 113   virtual void work(uint worker_id) {
 114     ShenandoahHeapRegion* r = _regions.next();
 115     while (r != NULL) {
 116       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 117       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 118       assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size));
 119 
 120       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);
 121 
 122       r = _regions.next();
 123     }
 124   }
 125 };
 126 
 127 jint ShenandoahHeap::initialize() {
 128   CollectedHeap::pre_initialize();
 129 
 130   ShenandoahBrooksPointer::initial_checks();
 131 
 132   initialize_heuristics();
 133 
 134   //
 135   // Figure out heap sizing
 136   //
 137 
 138   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 139   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 140   size_t heap_alignment = collector_policy()->heap_alignment();
 141 
 142   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 143 
 144   if (ShenandoahAlwaysPreTouch) {
 145     // Enabled pre-touch means the entire heap is committed right away.
 146     init_byte_size = max_byte_size;
 147   }
 148 
 149   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 150   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");














 151 
 152   _num_regions = ShenandoahHeapRegion::region_count();
 153 
 154   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 155   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 156   assert(num_committed_regions <= _num_regions, "sanity");
 157 
 158   _initial_size = num_committed_regions * reg_size_bytes;
 159   _committed = _initial_size;
 160 
 161   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 162   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();



 163 
 164   //
 165   // Reserve and commit memory for heap
 166   //
 167 
 168   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 169   _reserved.set_word_size(0);
 170   _reserved.set_start((HeapWord*)heap_rs.base());
 171   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 172   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 173   _heap_region_special = heap_rs.special();
 174 
 175   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 176          err_msg("Misaligned heap: " PTR_FORMAT, p2i(base())));
 177 
 178   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 179   if (!_heap_region_special) {
 180     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 181                               "Cannot commit heap memory");

 182   }
 183 
 184   //
 185   // Reserve and commit memory for bitmap(s)
 186   //






 187 


 188   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 189   _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size);

 190 
 191   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 192 
 193   guarantee(bitmap_bytes_per_region != 0,
 194             err_msg("Bitmap bytes per region should not be zero"));
 195   guarantee(is_power_of_2(bitmap_bytes_per_region),
 196             err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region));
 197 
 198   if (bitmap_page_size > bitmap_bytes_per_region) {
 199     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 200     _bitmap_bytes_per_slice = bitmap_page_size;
 201   } else {
 202     _bitmap_regions_per_slice = 1;
 203     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 204   }
 205 
 206   guarantee(_bitmap_regions_per_slice >= 1,
 207             err_msg("Should have at least one region per slice: " SIZE_FORMAT,
 208                     _bitmap_regions_per_slice));
 209 
 210   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 211             err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 212                     _bitmap_bytes_per_slice, bitmap_page_size));
 213 
 214   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 215   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 216   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 217   _bitmap_region_special = bitmap.special();
 218 
 219   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 220                               align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 221   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 222   if (!_bitmap_region_special) {
 223     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 224                               "Cannot commit bitmap memory");
 225   }
 226 
 227   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 228 
 229   if (ShenandoahVerify) {
 230     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 231     if (!verify_bitmap.special()) {
 232       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 233                                 "Cannot commit verification bitmap memory");
 234     }
 235     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 236     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 237     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 238     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 239   }
 240 
 241   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 242   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 243   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 244   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 245   _aux_bitmap_region_special = aux_bitmap.special();
 246   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 247 
 248   //
 249   // Create regions and region sets
 250   //
 251 
 252   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 253   _free_set = new ShenandoahFreeSet(this, _num_regions);
 254   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)sh_rs.base());
 255 
 256   {
 257     ShenandoahHeapLocker locker(lock());
 258 
 259     size_t size_words = ShenandoahHeapRegion::region_size_words();
 260 
 261     for (size_t i = 0; i < _num_regions; i++) {
 262       HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i;
 263       bool is_committed = i < num_committed_regions;
 264       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed);


 265 
 266       _marking_context->initialize_top_at_mark_start(r);
 267       _regions[i] = r;
 268       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 269     }
 270 
 271     // Initialize to complete
 272     _marking_context->mark_complete();
 273 
 274     _free_set->rebuild();
 275   }
 276 
 277   if (ShenandoahAlwaysPreTouch) {
 278     assert(!AlwaysPreTouch, "Should have been overridden");
 279 
 280     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 281     // before initialize() below zeroes it with initializing thread. For any given region,
 282     // we touch the region and the corresponding bitmaps from the same thread.
 283     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 284 
 285     size_t pretouch_heap_page_size = heap_page_size;
 286     size_t pretouch_bitmap_page_size = bitmap_page_size;
 287 
 288 #ifdef LINUX
 289     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 290     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 291     // them into huge one. Therefore, we need to pretouch with smaller pages.
 292     if (UseTransparentHugePages) {
 293       pretouch_heap_page_size = (size_t)os::vm_page_size();
 294       pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 295     }
 296 #endif
 297 
 298     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 299     // simpler by pre-touching continuous spaces (heap and bitmap) separately.



 300 
 301     log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
 302                        _num_regions, pretouch_bitmap_page_size);
 303     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
 304     _workers->run_task(&bcl);
 305 
 306     log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
 307                        _num_regions, pretouch_heap_page_size);
 308     ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
 309     _workers->run_task(&hcl);
 310   }
 311 
 312   //
 313   // Initialize the rest of GC subsystems
 314   //
 315 
 316   set_barrier_set(new ShenandoahBarrierSet(this));
 317 
 318   _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
 319   for (uint worker = 0; worker < _max_workers; worker++) {
 320     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
 321     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
 322   }
 323 
 324   // The call below uses stuff (the SATB* things) that are in G1, but probably
 325   // belong into a shared location.
 326   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 327                                                SATB_Q_FL_lock,
 328                                                20 /*G1SATBProcessCompletedThreshold */,
 329                                                Shared_SATB_Q_lock);
 330 
 331   _monitoring_support = new ShenandoahMonitoringSupport(this);
 332   _phase_timings = new ShenandoahPhaseTimings();
 333   ShenandoahStringDedup::initialize();
 334   ShenandoahCodeRoots::initialize();
 335 
 336   if (ShenandoahAllocationTrace) {
 337     _alloc_tracker = new ShenandoahAllocTracker();
 338   }
 339 
 340   if (ShenandoahPacing) {
 341     _pacer = new ShenandoahPacer(this);
 342     _pacer->setup_for_idle();
 343   } else {
 344     _pacer = NULL;
 345   }
 346 
 347   _control_thread = new ShenandoahControlThread();
 348 
 349   log_info(gc, init)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
 350                      byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));





 351 
 352   return JNI_OK;
 353 }
 354 
 355 #ifdef _MSC_VER
 356 #pragma warning( push )
 357 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 358 #endif
 359 
 360 void ShenandoahHeap::initialize_heuristics() {
 361   if (ShenandoahGCHeuristics != NULL) {
 362     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 363       _heuristics = new ShenandoahAggressiveHeuristics();
 364     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 365       _heuristics = new ShenandoahStaticHeuristics();
 366     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 367       _heuristics = new ShenandoahAdaptiveHeuristics();
 368     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 369       _heuristics = new ShenandoahPassiveHeuristics();
 370     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {


 376     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 377       vm_exit_during_initialization(
 378               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 379                       _heuristics->name()));
 380     }
 381     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 382       vm_exit_during_initialization(
 383               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 384                       _heuristics->name()));
 385     }
 386     log_info(gc, init)("Shenandoah heuristics: %s",
 387                        _heuristics->name());
 388   } else {
 389     ShouldNotReachHere();
 390   }
 391 }
 392 
 393 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 394   SharedHeap(policy),
 395   _shenandoah_policy(policy),
 396   _heap_region_special(false),
 397   _regions(NULL),
 398   _free_set(NULL),
 399   _collection_set(NULL),
 400   _update_refs_iterator(this),
 401   _bytes_allocated_since_gc_start(0),
 402   _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
 403   _ref_processor(NULL),
 404   _marking_context(NULL),
 405   _bitmap_size(0),
 406   _bitmap_regions_per_slice(0),
 407   _bitmap_bytes_per_slice(0),
 408   _bitmap_region_special(false),
 409   _aux_bitmap_region_special(false),
 410   _liveness_cache(NULL),
 411   _aux_bit_map(),
 412   _verifier(NULL),
 413   _pacer(NULL),
 414   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 415   _phase_timings(NULL),
 416   _alloc_tracker(NULL)
 417 {
 418   log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 419   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 420 
 421   _scm = new ShenandoahConcurrentMark();
 422   _full_gc = new ShenandoahMarkCompact();
 423   _used = 0;
 424 
 425   _max_workers = MAX2(_max_workers, 1U);
 426   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 427                             /* are_GC_task_threads */true,
 428                             /* are_ConcurrentGC_threads */false);
 429   if (_workers == NULL) {
 430     vm_exit_during_initialization("Failed necessary allocation.");


1190   // No-op.
1191 }
1192 
1193 /*
1194  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1195  *
1196  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1197  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1198  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1199  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1200  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1201  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1202  * wiped the bitmap in preparation for next marking).
1203  *
1204  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1205  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1206  * is allowed to report dead objects, but is not required to do so.
1207  */
1208 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1209   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1210   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1211     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1212     return;
1213   }
1214 
1215   // Reset bitmap
1216   _aux_bit_map.clear();
1217 
1218   Stack<oop,mtGC> oop_stack;
1219 
1220   // First, we process all GC roots. This populates the work stack with initial objects.
1221   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1222   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1223   CLDToOopClosure clds(&oops, false);
1224   CodeBlobToOopClosure blobs(&oops, false);
1225   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1226 
1227   // Work through the oop stack to traverse heap.
1228   while (! oop_stack.is_empty()) {
1229     oop obj = oop_stack.pop();
1230     assert(obj->is_oop(), "must be a valid oop");
1231     cl->do_object(obj);
1232     obj->oop_iterate(&oops);
1233   }
1234 
1235   assert(oop_stack.is_empty(), "should be empty");
1236 
1237   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1238     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1239   }
1240 }
1241 
1242 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1243   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1244   object_iterate(cl);
1245 }
1246 
1247 void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) {
1248   ObjectToOopClosure cl2(cl);
1249   object_iterate(&cl2);
1250 }
1251 
1252 class ShenandoahSpaceClosureRegionClosure: public ShenandoahHeapRegionClosure {
1253   SpaceClosure* _cl;
1254 public:
1255   ShenandoahSpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
1256   void heap_region_do(ShenandoahHeapRegion* r) {
1257     _cl->do_space(r);


2143 }
2144 
2145 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2146   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2147 
2148   size_t regions_from = _bitmap_regions_per_slice * slice;
2149   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2150   for (size_t g = regions_from; g < regions_to; g++) {
2151     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2152     if (skip_self && g == r->region_number()) continue;
2153     if (get_region(g)->is_committed()) {
2154       return true;
2155     }
2156   }
2157   return false;
2158 }
2159 
2160 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2161   assert_heaplock_owned_by_current_thread();
2162 
2163   // Bitmaps in special regions do not need commits
2164   if (_bitmap_region_special) {
2165     return true;
2166   }
2167 
2168   if (is_bitmap_slice_committed(r, true)) {
2169     // Some other region from the group is already committed, meaning the bitmap
2170     // slice is already committed, we exit right away.
2171     return true;
2172   }
2173 
2174   // Commit the bitmap slice:
2175   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2176   size_t off = _bitmap_bytes_per_slice * slice;
2177   size_t len = _bitmap_bytes_per_slice;
2178   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2179     return false;
2180   }
2181   return true;
2182 }
2183 
2184 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2185   assert_heaplock_owned_by_current_thread();
2186 
2187   // Bitmaps in special regions do not need uncommits
2188   if (_bitmap_region_special) {
2189     return true;
2190   }
2191 
2192   if (is_bitmap_slice_committed(r, true)) {
2193     // Some other region from the group is still committed, meaning the bitmap
2194     // slice is should stay committed, exit right away.
2195     return true;
2196   }
2197 
2198   // Uncommit the bitmap slice:
2199   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2200   size_t off = _bitmap_bytes_per_slice * slice;
2201   size_t len = _bitmap_bytes_per_slice;
2202   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2203     return false;
2204   }
2205   return true;
2206 }
2207 
2208 void ShenandoahHeap::vmop_entry_init_mark() {
2209   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2210   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2211   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);


2594 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2595   switch (point) {
2596     case _degenerated_unset:
2597       return "Pause Degenerated GC (<UNSET>)";
2598     case _degenerated_outside_cycle:
2599       return "Pause Degenerated GC (Outside of Cycle)";
2600     case _degenerated_mark:
2601       return "Pause Degenerated GC (Mark)";
2602     case _degenerated_evac:
2603       return "Pause Degenerated GC (Evacuation)";
2604     case _degenerated_updaterefs:
2605       return "Pause Degenerated GC (Update Refs)";
2606     default:
2607       ShouldNotReachHere();
2608       return "ERROR";
2609   }
2610 }
2611 
2612 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2613 #ifdef ASSERT
2614   assert(_liveness_cache != NULL, "sanity");
2615   assert(worker_id < _max_workers, "sanity");
2616   for (uint i = 0; i < num_regions(); i++) {
2617     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2618   }
2619 #endif
2620   return _liveness_cache[worker_id];
2621 }
2622 
2623 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2624   assert(worker_id < _max_workers, "sanity");
2625   assert(_liveness_cache != NULL, "sanity");
2626   jushort* ld = _liveness_cache[worker_id];
2627   for (uint i = 0; i < num_regions(); i++) {
2628     ShenandoahHeapRegion* r = get_region(i);
2629     jushort live = ld[i];
2630     if (live > 0) {
2631       r->increase_live_data_gc_words(live);
2632       ld[i] = 0;
2633     }
2634   }
2635 }
2636 
2637 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
2638   return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
2639                                                          : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
2640 }
< prev index next >