< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 52753 : [backport] 8221435: Shenandoah should not mark through weak roots
Reviewed-by: rkennke, shade


1100   }
1101 };
1102 
1103 void ShenandoahHeap::evacuate_and_update_roots() {
1104 #if defined(COMPILER2) || INCLUDE_JVMCI
1105   DerivedPointerTable::clear();
1106 #endif
1107   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1108 
1109   {
1110     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1111     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1112     workers()->run_task(&roots_task);
1113   }
1114 
1115 #if defined(COMPILER2) || INCLUDE_JVMCI
1116   DerivedPointerTable::update_pointers();
1117 #endif
1118 }
1119 
1120 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1121   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1122 
1123   CodeBlobToOopClosure blobsCl(cl, false);
1124   CLDToOopClosure cldCl(cl);
1125 
1126   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1127   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1128 }
1129 
1130 // Returns size in bytes
1131 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1132   if (ShenandoahElasticTLAB) {
1133     // With Elastic TLABs, return the max allowed size, and let the allocation path
1134     // figure out the safe size for current allocation.
1135     return ShenandoahHeapRegion::max_tlab_size_bytes();
1136   } else {
1137     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1138   }
1139 }
1140 
1141 size_t ShenandoahHeap::max_tlab_size() const {
1142   // Returns size in words
1143   return ShenandoahHeapRegion::max_tlab_size_words();
1144 }
1145 
1146 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1147 public:
1148   void do_thread(Thread* thread) {
1149     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);


1318  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1319  * is allowed to report dead objects, but is not required to do so.
1320  */
1321 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1322   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1323   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1324     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1325     return;
1326   }
1327 
1328   // Reset bitmap
1329   _aux_bit_map.clear();
1330 
1331   Stack<oop,mtGC> oop_stack;
1332 
1333   // First, we process all GC roots. This populates the work stack with initial objects.
1334   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1335   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1336   CLDToOopClosure clds(&oops, false);
1337   CodeBlobToOopClosure blobs(&oops, false);
1338   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1339 
1340   // Work through the oop stack to traverse heap.
1341   while (! oop_stack.is_empty()) {
1342     oop obj = oop_stack.pop();
1343     assert(oopDesc::is_oop(obj), "must be a valid oop");
1344     cl->do_object(obj);
1345     obj->oop_iterate(&oops);
1346   }
1347 
1348   assert(oop_stack.is_empty(), "should be empty");
1349 
1350   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1351     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1352   }
1353 }
1354 
1355 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1356   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1357   object_iterate(cl);
1358 }




1100   }
1101 };
1102 
1103 void ShenandoahHeap::evacuate_and_update_roots() {
1104 #if defined(COMPILER2) || INCLUDE_JVMCI
1105   DerivedPointerTable::clear();
1106 #endif
1107   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1108 
1109   {
1110     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1111     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1112     workers()->run_task(&roots_task);
1113   }
1114 
1115 #if defined(COMPILER2) || INCLUDE_JVMCI
1116   DerivedPointerTable::update_pointers();
1117 #endif
1118 }
1119 










1120 // Returns size in bytes
1121 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1122   if (ShenandoahElasticTLAB) {
1123     // With Elastic TLABs, return the max allowed size, and let the allocation path
1124     // figure out the safe size for current allocation.
1125     return ShenandoahHeapRegion::max_tlab_size_bytes();
1126   } else {
1127     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1128   }
1129 }
1130 
1131 size_t ShenandoahHeap::max_tlab_size() const {
1132   // Returns size in words
1133   return ShenandoahHeapRegion::max_tlab_size_words();
1134 }
1135 
1136 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1137 public:
1138   void do_thread(Thread* thread) {
1139     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);


1308  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1309  * is allowed to report dead objects, but is not required to do so.
1310  */
1311 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1312   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1313   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1314     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1315     return;
1316   }
1317 
1318   // Reset bitmap
1319   _aux_bit_map.clear();
1320 
1321   Stack<oop,mtGC> oop_stack;
1322 
1323   // First, we process all GC roots. This populates the work stack with initial objects.
1324   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1325   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1326   CLDToOopClosure clds(&oops, false);
1327   CodeBlobToOopClosure blobs(&oops, false);
1328   rp.process_all_roots(&oops, &clds, &blobs, NULL, 0);
1329 
1330   // Work through the oop stack to traverse heap.
1331   while (! oop_stack.is_empty()) {
1332     oop obj = oop_stack.pop();
1333     assert(oopDesc::is_oop(obj), "must be a valid oop");
1334     cl->do_object(obj);
1335     obj->oop_iterate(&oops);
1336   }
1337 
1338   assert(oop_stack.is_empty(), "should be empty");
1339 
1340   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1341     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1342   }
1343 }
1344 
1345 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1346   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1347   object_iterate(cl);
1348 }


< prev index next >