< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 54037 : 8220350: Refactor ShenandoahHeap::initialize
Reviewed-by: XXX
rev 54038 : 8220153: Shenandoah does not work with TransparentHugePages properly
Reviewed-by: XXX
rev 54039 : 8220162: Shenandoah should not commit HugeTLBFS memory
Reviewed-by: XXX


 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158 
 159   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 160   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 161   assert(num_committed_regions <= _num_regions, "sanity");
 162 
 163   _initial_size = num_committed_regions * reg_size_bytes;
 164   _committed = _initial_size;
 165 
 166   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 167   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 168 
 169   //
 170   // Reserve and commit memory for heap
 171   //
 172 
 173   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 174   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 175   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);

 176 
 177   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 178          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 179 
 180   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);

 181   os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 182                             "Cannot commit heap memory");

 183 
 184   //
 185   // Reserve and commit memory for bitmap(s)
 186   //
 187 
 188   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 189   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 190 
 191   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 192 
 193   guarantee(bitmap_bytes_per_region != 0,
 194             "Bitmap bytes per region should not be zero");
 195   guarantee(is_power_of_2(bitmap_bytes_per_region),
 196             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 197 
 198   if (bitmap_page_size > bitmap_bytes_per_region) {
 199     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 200     _bitmap_bytes_per_slice = bitmap_page_size;
 201   } else {
 202     _bitmap_regions_per_slice = 1;
 203     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 204   }
 205 
 206   guarantee(_bitmap_regions_per_slice >= 1,
 207             "Should have at least one region per slice: " SIZE_FORMAT,
 208             _bitmap_regions_per_slice);
 209 
 210   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 211             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 212             _bitmap_bytes_per_slice, bitmap_page_size);
 213 
 214   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 215   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 216   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);

 217 
 218   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 219                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 220   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 221   os::commit_memory_or_exit((char *)_bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,

 222                             "Cannot commit bitmap memory");

 223 
 224   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 225 
 226   if (ShenandoahVerify) {
 227     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);

 228     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 229                               "Cannot commit verification bitmap memory");

 230     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 231     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 232     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 233     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 234   }
 235 
 236   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 237   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 238   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 239   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);

 240   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 241 
 242   //
 243   // Create regions and region sets
 244   //
 245 
 246   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 247   _free_set = new ShenandoahFreeSet(this, _num_regions);
 248   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)sh_rs.base());
 249 
 250   {
 251     ShenandoahHeapLocker locker(lock());
 252     for (size_t i = 0; i < _num_regions; i++) {
 253       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(
 254               this,
 255               (HeapWord*) sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i,
 256               ShenandoahHeapRegion::region_size_words(),
 257               i,
 258               i < num_committed_regions
 259       );


 390   } else {
 391       ShouldNotReachHere();
 392   }
 393 
 394 }
 395 
 396 #ifdef _MSC_VER
 397 #pragma warning( push )
 398 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 399 #endif
 400 
 401 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 402   CollectedHeap(),
 403   _initial_size(0),
 404   _used(0),
 405   _committed(0),
 406   _bytes_allocated_since_gc_start(0),
 407   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 408   _workers(NULL),
 409   _safepoint_workers(NULL),

 410   _num_regions(0),
 411   _regions(NULL),
 412   _update_refs_iterator(this),
 413   _control_thread(NULL),
 414   _shenandoah_policy(policy),
 415   _heuristics(NULL),
 416   _free_set(NULL),
 417   _scm(new ShenandoahConcurrentMark()),
 418   _traversal_gc(NULL),
 419   _full_gc(new ShenandoahMarkCompact()),
 420   _pacer(NULL),
 421   _verifier(NULL),
 422   _alloc_tracker(NULL),
 423   _phase_timings(NULL),
 424   _monitoring_support(NULL),
 425   _memory_pool(NULL),
 426   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 427   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 428   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 429   _soft_ref_policy(),
 430   _ref_processor(NULL),
 431   _marking_context(NULL),
 432   _bitmap_size(0),
 433   _bitmap_regions_per_slice(0),
 434   _bitmap_bytes_per_slice(0),


 435   _liveness_cache(NULL),
 436   _collection_set(NULL)
 437 {
 438   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 439   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 440 
 441   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 442 
 443   _max_workers = MAX2(_max_workers, 1U);
 444   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 445                             /* are_GC_task_threads */true,
 446                             /* are_ConcurrentGC_threads */false);
 447   if (_workers == NULL) {
 448     vm_exit_during_initialization("Failed necessary allocation.");
 449   } else {
 450     _workers->initialize_workers();
 451   }
 452 
 453   if (ShenandoahParallelSafepointThreads > 1) {
 454     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",


1300   // No-op.
1301 }
1302 
1303 /*
1304  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1305  *
1306  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1307  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1308  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1309  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1310  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1311  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1312  * wiped the bitmap in preparation for next marking).
1313  *
1314  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1315  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1316  * is allowed to report dead objects, but is not required to do so.
1317  */
1318 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1319   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1320   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1321     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1322     return;
1323   }
1324 
1325   // Reset bitmap
1326   _aux_bit_map.clear();
1327 
1328   Stack<oop,mtGC> oop_stack;
1329 
1330   // First, we process all GC roots. This populates the work stack with initial objects.
1331   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1332   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1333   CLDToOopClosure clds(&oops, ClassLoaderData::_claim_none);
1334   CodeBlobToOopClosure blobs(&oops, false);
1335   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1336 
1337   // Work through the oop stack to traverse heap.
1338   while (! oop_stack.is_empty()) {
1339     oop obj = oop_stack.pop();
1340     assert(oopDesc::is_oop(obj), "must be a valid oop");
1341     cl->do_object(obj);
1342     obj->oop_iterate(&oops);
1343   }
1344 
1345   assert(oop_stack.is_empty(), "should be empty");
1346 
1347   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1348     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1349   }
1350 }
1351 
1352 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1353   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1354   object_iterate(cl);
1355 }
1356 
1357 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1358   for (size_t i = 0; i < num_regions(); i++) {
1359     ShenandoahHeapRegion* current = get_region(i);
1360     blk->heap_region_do(current);
1361   }
1362 }
1363 
1364 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1365 private:
1366   ShenandoahHeap* const _heap;
1367   ShenandoahHeapRegionClosure* const _blk;


2254 }
2255 
2256 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2257   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2258 
2259   size_t regions_from = _bitmap_regions_per_slice * slice;
2260   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2261   for (size_t g = regions_from; g < regions_to; g++) {
2262     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2263     if (skip_self && g == r->region_number()) continue;
2264     if (get_region(g)->is_committed()) {
2265       return true;
2266     }
2267   }
2268   return false;
2269 }
2270 
2271 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2272   assert_heaplock_owned_by_current_thread();
2273 





2274   if (is_bitmap_slice_committed(r, true)) {
2275     // Some other region from the group is already committed, meaning the bitmap
2276     // slice is already committed, we exit right away.
2277     return true;
2278   }
2279 
2280   // Commit the bitmap slice:
2281   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2282   size_t off = _bitmap_bytes_per_slice * slice;
2283   size_t len = _bitmap_bytes_per_slice;
2284   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2285     return false;
2286   }
2287   return true;
2288 }
2289 
2290 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2291   assert_heaplock_owned_by_current_thread();





2292 
2293   if (is_bitmap_slice_committed(r, true)) {
2294     // Some other region from the group is still committed, meaning the bitmap
2295     // slice is should stay committed, exit right away.
2296     return true;
2297   }
2298 
2299   // Uncommit the bitmap slice:
2300   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2301   size_t off = _bitmap_bytes_per_slice * slice;
2302   size_t len = _bitmap_bytes_per_slice;
2303   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2304     return false;
2305   }
2306   return true;
2307 }
2308 
2309 void ShenandoahHeap::safepoint_synchronize_begin() {
2310   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2311     SuspendibleThreadSet::synchronize();




 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158 
 159   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 160   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 161   assert(num_committed_regions <= _num_regions, "sanity");
 162 
 163   _initial_size = num_committed_regions * reg_size_bytes;
 164   _committed = _initial_size;
 165 
 166   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 167   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 168 
 169   //
 170   // Reserve and commit memory for heap
 171   //
 172 
 173   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 174   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 175   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 176   _heap_region_special = heap_rs.special();
 177 
 178   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 179          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 180 
 181   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 182   if (!_heap_region_special) {
 183     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 184                               "Cannot commit heap memory");
 185   }
 186 
 187   //
 188   // Reserve and commit memory for bitmap(s)
 189   //
 190 
 191   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 192   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 193 
 194   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 195 
 196   guarantee(bitmap_bytes_per_region != 0,
 197             "Bitmap bytes per region should not be zero");
 198   guarantee(is_power_of_2(bitmap_bytes_per_region),
 199             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 200 
 201   if (bitmap_page_size > bitmap_bytes_per_region) {
 202     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 203     _bitmap_bytes_per_slice = bitmap_page_size;
 204   } else {
 205     _bitmap_regions_per_slice = 1;
 206     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 207   }
 208 
 209   guarantee(_bitmap_regions_per_slice >= 1,
 210             "Should have at least one region per slice: " SIZE_FORMAT,
 211             _bitmap_regions_per_slice);
 212 
 213   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 214             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 215             _bitmap_bytes_per_slice, bitmap_page_size);
 216 
 217   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 218   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 219   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 220   _bitmap_region_special = bitmap.special();
 221 
 222   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 223                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 224   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 225   if (!_bitmap_region_special) {
 226     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 227                               "Cannot commit bitmap memory");
 228   }
 229 
 230   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 231 
 232   if (ShenandoahVerify) {
 233     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 234     if (!verify_bitmap.special()) {
 235       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 236                                 "Cannot commit verification bitmap memory");
 237     }
 238     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 239     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 240     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 241     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 242   }
 243 
 244   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 245   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 246   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 247   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 248   _aux_bitmap_region_special = aux_bitmap.special();
 249   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 250 
 251   //
 252   // Create regions and region sets
 253   //
 254 
 255   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 256   _free_set = new ShenandoahFreeSet(this, _num_regions);
 257   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)sh_rs.base());
 258 
 259   {
 260     ShenandoahHeapLocker locker(lock());
 261     for (size_t i = 0; i < _num_regions; i++) {
 262       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(
 263               this,
 264               (HeapWord*) sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i,
 265               ShenandoahHeapRegion::region_size_words(),
 266               i,
 267               i < num_committed_regions
 268       );


 399   } else {
 400       ShouldNotReachHere();
 401   }
 402 
 403 }
 404 
 405 #ifdef _MSC_VER
 406 #pragma warning( push )
 407 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 408 #endif
 409 
 410 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 411   CollectedHeap(),
 412   _initial_size(0),
 413   _used(0),
 414   _committed(0),
 415   _bytes_allocated_since_gc_start(0),
 416   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 417   _workers(NULL),
 418   _safepoint_workers(NULL),
 419   _heap_region_special(false),
 420   _num_regions(0),
 421   _regions(NULL),
 422   _update_refs_iterator(this),
 423   _control_thread(NULL),
 424   _shenandoah_policy(policy),
 425   _heuristics(NULL),
 426   _free_set(NULL),
 427   _scm(new ShenandoahConcurrentMark()),
 428   _traversal_gc(NULL),
 429   _full_gc(new ShenandoahMarkCompact()),
 430   _pacer(NULL),
 431   _verifier(NULL),
 432   _alloc_tracker(NULL),
 433   _phase_timings(NULL),
 434   _monitoring_support(NULL),
 435   _memory_pool(NULL),
 436   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 437   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 438   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 439   _soft_ref_policy(),
 440   _ref_processor(NULL),
 441   _marking_context(NULL),
 442   _bitmap_size(0),
 443   _bitmap_regions_per_slice(0),
 444   _bitmap_bytes_per_slice(0),
 445   _bitmap_region_special(false),
 446   _aux_bitmap_region_special(false),
 447   _liveness_cache(NULL),
 448   _collection_set(NULL)
 449 {
 450   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 451   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 452 
 453   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 454 
 455   _max_workers = MAX2(_max_workers, 1U);
 456   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 457                             /* are_GC_task_threads */true,
 458                             /* are_ConcurrentGC_threads */false);
 459   if (_workers == NULL) {
 460     vm_exit_during_initialization("Failed necessary allocation.");
 461   } else {
 462     _workers->initialize_workers();
 463   }
 464 
 465   if (ShenandoahParallelSafepointThreads > 1) {
 466     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",


1312   // No-op.
1313 }
1314 
1315 /*
1316  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1317  *
1318  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1319  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1320  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1321  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1322  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1323  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1324  * wiped the bitmap in preparation for next marking).
1325  *
1326  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1327  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1328  * is allowed to report dead objects, but is not required to do so.
1329  */
1330 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1331   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1332   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1333     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1334     return;
1335   }
1336 
1337   // Reset bitmap
1338   _aux_bit_map.clear();
1339 
1340   Stack<oop,mtGC> oop_stack;
1341 
1342   // First, we process all GC roots. This populates the work stack with initial objects.
1343   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1344   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1345   CLDToOopClosure clds(&oops, ClassLoaderData::_claim_none);
1346   CodeBlobToOopClosure blobs(&oops, false);
1347   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1348 
1349   // Work through the oop stack to traverse heap.
1350   while (! oop_stack.is_empty()) {
1351     oop obj = oop_stack.pop();
1352     assert(oopDesc::is_oop(obj), "must be a valid oop");
1353     cl->do_object(obj);
1354     obj->oop_iterate(&oops);
1355   }
1356 
1357   assert(oop_stack.is_empty(), "should be empty");
1358 
1359   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1360     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1361   }
1362 }
1363 
1364 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1365   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1366   object_iterate(cl);
1367 }
1368 
1369 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1370   for (size_t i = 0; i < num_regions(); i++) {
1371     ShenandoahHeapRegion* current = get_region(i);
1372     blk->heap_region_do(current);
1373   }
1374 }
1375 
1376 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1377 private:
1378   ShenandoahHeap* const _heap;
1379   ShenandoahHeapRegionClosure* const _blk;


2266 }
2267 
2268 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2269   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2270 
2271   size_t regions_from = _bitmap_regions_per_slice * slice;
2272   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2273   for (size_t g = regions_from; g < regions_to; g++) {
2274     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2275     if (skip_self && g == r->region_number()) continue;
2276     if (get_region(g)->is_committed()) {
2277       return true;
2278     }
2279   }
2280   return false;
2281 }
2282 
2283 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2284   assert_heaplock_owned_by_current_thread();
2285 
2286   // Bitmaps in special regions do not need commits
2287   if (_bitmap_region_special) {
2288     return true;
2289   }
2290 
2291   if (is_bitmap_slice_committed(r, true)) {
2292     // Some other region from the group is already committed, meaning the bitmap
2293     // slice is already committed, we exit right away.
2294     return true;
2295   }
2296 
2297   // Commit the bitmap slice:
2298   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2299   size_t off = _bitmap_bytes_per_slice * slice;
2300   size_t len = _bitmap_bytes_per_slice;
2301   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2302     return false;
2303   }
2304   return true;
2305 }
2306 
2307 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2308   assert_heaplock_owned_by_current_thread();
2309 
2310   // Bitmaps in special regions do not need uncommits
2311   if (_bitmap_region_special) {
2312     return true;
2313   }
2314 
2315   if (is_bitmap_slice_committed(r, true)) {
2316     // Some other region from the group is still committed, meaning the bitmap
2317     // slice is should stay committed, exit right away.
2318     return true;
2319   }
2320 
2321   // Uncommit the bitmap slice:
2322   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2323   size_t off = _bitmap_bytes_per_slice * slice;
2324   size_t len = _bitmap_bytes_per_slice;
2325   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2326     return false;
2327   }
2328   return true;
2329 }
2330 
2331 void ShenandoahHeap::safepoint_synchronize_begin() {
2332   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2333     SuspendibleThreadSet::synchronize();


< prev index next >