< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 59828 : 8247845: Shenandoah: refactor TLAB/GCLAB retirement code
Reviewed-by: XXX


1027 
1028   oop humongous_obj = oop(start->bottom());
1029   size_t size = humongous_obj->size();
1030   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1031   size_t index = start->index() + required_regions - 1;
1032 
1033   assert(!start->has_live(), "liveness must be zero");
1034 
1035   for(size_t i = 0; i < required_regions; i++) {
1036     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1037     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1038     ShenandoahHeapRegion* region = get_region(index --);
1039 
1040     assert(region->is_humongous(), "expect correct humongous start or continuation");
1041     assert(!region->is_cset(), "Humongous region should not be in collection set");
1042 
1043     region->make_trash_immediate();
1044   }
1045 }
1046 










1047 class ShenandoahRetireGCLABClosure : public ThreadClosure {


1048 public:

1049   void do_thread(Thread* thread) {
1050     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1051     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1052     gclab->retire();



1053   }
1054 };
1055 
1056 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1057   if (UseTLAB) {
1058     CollectedHeap::ensure_parsability(retire_tlabs);






1059   }
1060   ShenandoahRetireGCLABClosure cl;





















1061   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1062     cl.do_thread(t);
1063   }
1064   workers()->threads_do(&cl);

1065 }
1066 
1067 void ShenandoahHeap::resize_tlabs() {
1068   CollectedHeap::resize_all_tlabs();







1069 }
1070 
1071 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1072 private:
1073   ShenandoahRootEvacuator* _rp;
1074 
1075 public:
1076   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1077     AbstractGangTask("Shenandoah evacuate and update roots"),
1078     _rp(rp) {}
1079 
1080   void work(uint worker_id) {
1081     ShenandoahParallelWorkerSession worker_session(worker_id);
1082     ShenandoahEvacOOMScope oom_evac_scope;
1083     ShenandoahEvacuateUpdateRootsClosure<> cl;
1084     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1085     _rp->roots_do(worker_id, &cl);
1086   }
1087 };
1088 


1105   DerivedPointerTable::update_pointers();
1106 #endif
1107 }
1108 
1109 // Returns size in bytes
1110 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1111   if (ShenandoahElasticTLAB) {
1112     // With Elastic TLABs, return the max allowed size, and let the allocation path
1113     // figure out the safe size for current allocation.
1114     return ShenandoahHeapRegion::max_tlab_size_bytes();
1115   } else {
1116     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1117   }
1118 }
1119 
1120 size_t ShenandoahHeap::max_tlab_size() const {
1121   // Returns size in words
1122   return ShenandoahHeapRegion::max_tlab_size_words();
1123 }
1124 
1125 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1126 public:
1127   void do_thread(Thread* thread) {
1128     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1129     gclab->retire();
1130     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1131       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1132     }
1133   }
1134 };
1135 
1136 void ShenandoahHeap::retire_and_reset_gclabs() {
1137   ShenandoahRetireAndResetGCLABClosure cl;
1138   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1139     cl.do_thread(t);
1140   }
1141   workers()->threads_do(&cl);
1142 }
1143 
1144 void ShenandoahHeap::collect(GCCause::Cause cause) {
1145   control_thread()->request_gc(cause);
1146 }
1147 
1148 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1149   //assert(false, "Shouldn't need to do full collections");
1150 }
1151 
1152 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1153   ShenandoahHeapRegion* r = heap_region_containing(addr);
1154   if (r != NULL) {
1155     return r->block_start(addr);
1156   }
1157   return NULL;
1158 }
1159 
1160 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1161   ShenandoahHeapRegion* r = heap_region_containing(addr);
1162   return r->block_is_obj(addr);
1163 }
1164 
1165 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1166   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1167 }
1168 
1169 jlong ShenandoahHeap::millis_since_last_gc() {
1170   double v = heuristics()->time_since_last_gc() * 1000;
1171   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1172   return (jlong)v;
1173 }
1174 
1175 void ShenandoahHeap::prepare_for_verify() {
1176   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1177     make_parsable(false);
1178   }
1179 }
1180 
1181 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1182   workers()->threads_do(tcl);
1183   if (_safepoint_workers != NULL) {
1184     _safepoint_workers->threads_do(tcl);
1185   }
1186   if (ShenandoahStringDedup::is_enabled()) {
1187     ShenandoahStringDedup::threads_do(tcl);
1188   }
1189 }
1190 
1191 void ShenandoahHeap::print_tracing_info() const {
1192   LogTarget(Info, gc, stats) lt;
1193   if (lt.is_enabled()) {
1194     ResourceMark rm;
1195     LogStream ls(lt);
1196 
1197     phase_timings()->print_global_on(&ls);


1247 
1248       assert(oopDesc::is_oop(obj), "must be a valid oop");
1249       if (!_bitmap->is_marked(obj)) {
1250         _bitmap->mark(obj);
1251         _oop_stack->push(obj);
1252       }
1253     }
1254   }
1255 public:
1256   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1257     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1258     _marking_context(_heap->marking_context()) {}
1259   void do_oop(oop* p)       { do_oop_work(p); }
1260   void do_oop(narrowOop* p) { do_oop_work(p); }
1261 };
1262 
1263 /*
1264  * This is public API, used in preparation of object_iterate().
1265  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1266  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1267  * control, we call SH::make_tlabs_parsable().
1268  */
1269 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1270   // No-op.
1271 }
1272 
1273 /*
1274  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1275  *
1276  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1277  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1278  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1279  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1280  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1281  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1282  * wiped the bitmap in preparation for next marking).
1283  *
1284  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1285  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1286  * is allowed to report dead objects, but is not required to do so.
1287  */


1403   bool is_thread_safe() { return true; }
1404 };
1405 
1406 void ShenandoahHeap::op_init_mark() {
1407   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1408   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1409 
1410   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1411   assert(!marking_context()->is_complete(), "should not be complete");
1412   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1413 
1414   if (ShenandoahVerify) {
1415     verifier()->verify_before_concmark();
1416   }
1417 
1418   if (VerifyBeforeGC) {
1419     Universe::verify();
1420   }
1421 
1422   set_concurrent_mark_in_progress(true);
1423   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1424   {
1425     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1426     make_parsable(true);



1427   }
1428 
1429   {
1430     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1431     ShenandoahInitMarkUpdateRegionStateClosure cl;
1432     parallel_heap_region_iterate(&cl);
1433   }
1434 
1435   // Make above changes visible to worker threads
1436   OrderAccess::fence();
1437 
1438   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1439 
1440   if (UseTLAB) {
1441     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1442     resize_tlabs();
1443   }
1444 
1445   if (ShenandoahPacing) {
1446     pacer()->setup_for_mark();
1447   }
1448 
1449   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
1450   // we need to make sure that all its metadata are marked. alternative is to remark
1451   // thread roots at final mark pause, but it can be potential latency killer.
1452   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1453     ShenandoahCodeRoots::arm_nmethods();
1454   }
1455 }
1456 
1457 void ShenandoahHeap::op_mark() {
1458   concurrent_mark()->mark_from_roots();
1459 }
1460 
1461 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1462 private:
1463   ShenandoahMarkingContext* const _ctx;
1464   ShenandoahHeapLock* const _lock;


1522 
1523     parallel_cleaning(false /* full gc*/);
1524 
1525     if (ShenandoahVerify) {
1526       verifier()->verify_roots_no_forwarded();
1527     }
1528 
1529     {
1530       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1531       ShenandoahFinalMarkUpdateRegionStateClosure cl;
1532       parallel_heap_region_iterate(&cl);
1533 
1534       assert_pinned_region_status();
1535     }
1536 
1537     // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
1538     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
1539     // which would be outside the collection set, so no cset writes would happen there.
1540     // Weaker one: new allocations would happen past update watermark, and so less work would
1541     // be needed for reference updates (would update the large filler instead).
1542     {
1543       ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1544       make_parsable(true);
1545     }
1546 
1547     {
1548       ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
1549       ShenandoahHeapLocker locker(lock());
1550       _collection_set->clear();
1551       heuristics()->choose_collection_set(_collection_set);
1552     }
1553 
1554     {
1555       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1556       ShenandoahHeapLocker locker(lock());
1557       _free_set->rebuild();
1558     }
1559 
1560     if (!is_degenerated_gc_in_progress()) {
1561       prepare_concurrent_roots();
1562       prepare_concurrent_unloading();
1563     }
1564 


1900     pacer()->setup_for_reset();
1901   }
1902   reset_mark_bitmap();
1903 
1904   ShenandoahResetUpdateRegionStateClosure cl;
1905   parallel_heap_region_iterate(&cl);
1906 }
1907 
1908 void ShenandoahHeap::op_preclean() {
1909   if (ShenandoahPacing) {
1910     pacer()->setup_for_preclean();
1911   }
1912   concurrent_mark()->preclean_weak_refs();
1913 }
1914 
1915 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1916   ShenandoahMetricsSnapshot metrics;
1917   metrics.snap_before();
1918 
1919   full_gc()->do_it(cause);
1920   if (UseTLAB) {
1921     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1922     resize_all_tlabs();
1923   }
1924 
1925   metrics.snap_after();
1926 
1927   if (metrics.is_good_progress()) {
1928     _progress_last_gc.set();
1929   } else {
1930     // Nothing to do. Tell the allocation path that we have failed to make
1931     // progress, and it can finally fail.
1932     _progress_last_gc.unset();
1933   }
1934 }
1935 
1936 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1937   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1938   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1939   // some phase, we have to upgrade the Degenerate GC to Full GC.
1940 
1941   clear_cancelled_gc();
1942 
1943   ShenandoahMetricsSnapshot metrics;


2494         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2495       }
2496       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2497         return;
2498       }
2499       r = _regions->next();
2500     }
2501   }
2502 };
2503 
2504 void ShenandoahHeap::update_heap_references(bool concurrent) {
2505   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2506   workers()->run_task(&task);
2507 }
2508 
2509 void ShenandoahHeap::op_init_updaterefs() {
2510   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2511 
2512   set_evacuation_in_progress(false);
2513 
2514   {
2515     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
2516     retire_and_reset_gclabs();



2517   }
2518 
2519   if (ShenandoahVerify) {
2520     if (!is_degenerated_gc_in_progress()) {
2521       verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2522     }
2523     verifier()->verify_before_updaterefs();
2524   }
2525 
2526   set_update_refs_in_progress(true);
2527 
2528   _update_refs_iterator.reset();
2529 
2530   if (ShenandoahPacing) {
2531     pacer()->setup_for_updaterefs();
2532   }
2533 }
2534 
2535 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2536 private:




1027 
1028   oop humongous_obj = oop(start->bottom());
1029   size_t size = humongous_obj->size();
1030   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1031   size_t index = start->index() + required_regions - 1;
1032 
1033   assert(!start->has_live(), "liveness must be zero");
1034 
1035   for(size_t i = 0; i < required_regions; i++) {
1036     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1037     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1038     ShenandoahHeapRegion* region = get_region(index --);
1039 
1040     assert(region->is_humongous(), "expect correct humongous start or continuation");
1041     assert(!region->is_cset(), "Humongous region should not be in collection set");
1042 
1043     region->make_trash_immediate();
1044   }
1045 }
1046 
1047 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1048 public:
1049   ShenandoahCheckCleanGCLABClosure() {}
1050   void do_thread(Thread* thread) {
1051     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1052     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1053     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1054   }
1055 };
1056 
1057 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1058 private:
1059   bool const _resize;
1060 public:
1061   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1062   void do_thread(Thread* thread) {
1063     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1064     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1065     gclab->retire();
1066     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1067       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1068     }
1069   }
1070 };
1071 
1072 void ShenandoahHeap::labs_make_parsable() {
1073   assert(UseTLAB, "Only call with UseTLAB");
1074 
1075   ShenandoahRetireGCLABClosure cl(false);
1076 
1077   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1078     ThreadLocalAllocBuffer& tlab = t->tlab();
1079     tlab.make_parsable();
1080     cl.do_thread(t);
1081   }
1082 
1083   workers()->threads_do(&cl);
1084 }
1085 
1086 void ShenandoahHeap::tlabs_retire(bool resize) {
1087   assert(UseTLAB, "Only call with UseTLAB");
1088   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1089 
1090   ThreadLocalAllocStats stats;
1091 
1092   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1093     ThreadLocalAllocBuffer& tlab = t->tlab();
1094     tlab.retire(&stats);
1095     if (resize) {
1096       tlab.resize();
1097     }
1098   }
1099 
1100   stats.publish();
1101 
1102 #ifdef ASSERT
1103   ShenandoahCheckCleanGCLABClosure cl;
1104   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1105     cl.do_thread(t);
1106   }
1107   workers()->threads_do(&cl);
1108 #endif
1109 }
1110 
1111 void ShenandoahHeap::gclabs_retire(bool resize) {
1112   assert(UseTLAB, "Only call with UseTLAB");
1113   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1114 
1115   ShenandoahRetireGCLABClosure cl(resize);
1116   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1117     cl.do_thread(t);
1118   }
1119   workers()->threads_do(&cl);
1120 }
1121 
1122 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1123 private:
1124   ShenandoahRootEvacuator* _rp;
1125 
1126 public:
1127   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1128     AbstractGangTask("Shenandoah evacuate and update roots"),
1129     _rp(rp) {}
1130 
1131   void work(uint worker_id) {
1132     ShenandoahParallelWorkerSession worker_session(worker_id);
1133     ShenandoahEvacOOMScope oom_evac_scope;
1134     ShenandoahEvacuateUpdateRootsClosure<> cl;
1135     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1136     _rp->roots_do(worker_id, &cl);
1137   }
1138 };
1139 


1156   DerivedPointerTable::update_pointers();
1157 #endif
1158 }
1159 
1160 // Returns size in bytes
1161 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1162   if (ShenandoahElasticTLAB) {
1163     // With Elastic TLABs, return the max allowed size, and let the allocation path
1164     // figure out the safe size for current allocation.
1165     return ShenandoahHeapRegion::max_tlab_size_bytes();
1166   } else {
1167     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1168   }
1169 }
1170 
1171 size_t ShenandoahHeap::max_tlab_size() const {
1172   // Returns size in words
1173   return ShenandoahHeapRegion::max_tlab_size_words();
1174 }
1175 



















1176 void ShenandoahHeap::collect(GCCause::Cause cause) {
1177   control_thread()->request_gc(cause);
1178 }
1179 
1180 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1181   //assert(false, "Shouldn't need to do full collections");
1182 }
1183 
1184 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1185   ShenandoahHeapRegion* r = heap_region_containing(addr);
1186   if (r != NULL) {
1187     return r->block_start(addr);
1188   }
1189   return NULL;
1190 }
1191 
1192 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1193   ShenandoahHeapRegion* r = heap_region_containing(addr);
1194   return r->block_is_obj(addr);
1195 }
1196 
1197 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1198   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1199 }
1200 
1201 jlong ShenandoahHeap::millis_since_last_gc() {
1202   double v = heuristics()->time_since_last_gc() * 1000;
1203   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1204   return (jlong)v;
1205 }
1206 
1207 void ShenandoahHeap::prepare_for_verify() {
1208   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1209     labs_make_parsable();
1210   }
1211 }
1212 
1213 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1214   workers()->threads_do(tcl);
1215   if (_safepoint_workers != NULL) {
1216     _safepoint_workers->threads_do(tcl);
1217   }
1218   if (ShenandoahStringDedup::is_enabled()) {
1219     ShenandoahStringDedup::threads_do(tcl);
1220   }
1221 }
1222 
1223 void ShenandoahHeap::print_tracing_info() const {
1224   LogTarget(Info, gc, stats) lt;
1225   if (lt.is_enabled()) {
1226     ResourceMark rm;
1227     LogStream ls(lt);
1228 
1229     phase_timings()->print_global_on(&ls);


1279 
1280       assert(oopDesc::is_oop(obj), "must be a valid oop");
1281       if (!_bitmap->is_marked(obj)) {
1282         _bitmap->mark(obj);
1283         _oop_stack->push(obj);
1284       }
1285     }
1286   }
1287 public:
1288   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1289     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1290     _marking_context(_heap->marking_context()) {}
1291   void do_oop(oop* p)       { do_oop_work(p); }
1292   void do_oop(narrowOop* p) { do_oop_work(p); }
1293 };
1294 
1295 /*
1296  * This is public API, used in preparation of object_iterate().
1297  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1298  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1299  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1300  */
1301 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1302   // No-op.
1303 }
1304 
1305 /*
1306  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1307  *
1308  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1309  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1310  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1311  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1312  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1313  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1314  * wiped the bitmap in preparation for next marking).
1315  *
1316  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1317  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1318  * is allowed to report dead objects, but is not required to do so.
1319  */


1435   bool is_thread_safe() { return true; }
1436 };
1437 
1438 void ShenandoahHeap::op_init_mark() {
1439   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1440   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1441 
1442   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1443   assert(!marking_context()->is_complete(), "should not be complete");
1444   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1445 
1446   if (ShenandoahVerify) {
1447     verifier()->verify_before_concmark();
1448   }
1449 
1450   if (VerifyBeforeGC) {
1451     Universe::verify();
1452   }
1453 
1454   set_concurrent_mark_in_progress(true);
1455 
1456   // We need to reset all TLABs because they might be below the TAMS, and we need to mark
1457   // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
1458   // It is also a good place to resize the TLAB sizes for future allocations.
1459   if (UseTLAB) {
1460     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_manage_tlabs);
1461     tlabs_retire(ResizeTLAB);
1462   }
1463 
1464   {
1465     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1466     ShenandoahInitMarkUpdateRegionStateClosure cl;
1467     parallel_heap_region_iterate(&cl);
1468   }
1469 
1470   // Make above changes visible to worker threads
1471   OrderAccess::fence();
1472 
1473   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1474 





1475   if (ShenandoahPacing) {
1476     pacer()->setup_for_mark();
1477   }
1478 
1479   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
1480   // we need to make sure that all its metadata are marked. alternative is to remark
1481   // thread roots at final mark pause, but it can be potential latency killer.
1482   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1483     ShenandoahCodeRoots::arm_nmethods();
1484   }
1485 }
1486 
1487 void ShenandoahHeap::op_mark() {
1488   concurrent_mark()->mark_from_roots();
1489 }
1490 
1491 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1492 private:
1493   ShenandoahMarkingContext* const _ctx;
1494   ShenandoahHeapLock* const _lock;


1552 
1553     parallel_cleaning(false /* full gc*/);
1554 
1555     if (ShenandoahVerify) {
1556       verifier()->verify_roots_no_forwarded();
1557     }
1558 
1559     {
1560       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1561       ShenandoahFinalMarkUpdateRegionStateClosure cl;
1562       parallel_heap_region_iterate(&cl);
1563 
1564       assert_pinned_region_status();
1565     }
1566 
1567     // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
1568     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
1569     // which would be outside the collection set, so no cset writes would happen there.
1570     // Weaker one: new allocations would happen past update watermark, and so less work would
1571     // be needed for reference updates (would update the large filler instead).
1572     if (UseTLAB) {
1573       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_manage_labs);
1574       tlabs_retire(false);
1575     }
1576 
1577     {
1578       ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
1579       ShenandoahHeapLocker locker(lock());
1580       _collection_set->clear();
1581       heuristics()->choose_collection_set(_collection_set);
1582     }
1583 
1584     {
1585       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1586       ShenandoahHeapLocker locker(lock());
1587       _free_set->rebuild();
1588     }
1589 
1590     if (!is_degenerated_gc_in_progress()) {
1591       prepare_concurrent_roots();
1592       prepare_concurrent_unloading();
1593     }
1594 


1930     pacer()->setup_for_reset();
1931   }
1932   reset_mark_bitmap();
1933 
1934   ShenandoahResetUpdateRegionStateClosure cl;
1935   parallel_heap_region_iterate(&cl);
1936 }
1937 
1938 void ShenandoahHeap::op_preclean() {
1939   if (ShenandoahPacing) {
1940     pacer()->setup_for_preclean();
1941   }
1942   concurrent_mark()->preclean_weak_refs();
1943 }
1944 
1945 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1946   ShenandoahMetricsSnapshot metrics;
1947   metrics.snap_before();
1948 
1949   full_gc()->do_it(cause);




1950 
1951   metrics.snap_after();
1952 
1953   if (metrics.is_good_progress()) {
1954     _progress_last_gc.set();
1955   } else {
1956     // Nothing to do. Tell the allocation path that we have failed to make
1957     // progress, and it can finally fail.
1958     _progress_last_gc.unset();
1959   }
1960 }
1961 
1962 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1963   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1964   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1965   // some phase, we have to upgrade the Degenerate GC to Full GC.
1966 
1967   clear_cancelled_gc();
1968 
1969   ShenandoahMetricsSnapshot metrics;


2520         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2521       }
2522       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2523         return;
2524       }
2525       r = _regions->next();
2526     }
2527   }
2528 };
2529 
2530 void ShenandoahHeap::update_heap_references(bool concurrent) {
2531   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2532   workers()->run_task(&task);
2533 }
2534 
2535 void ShenandoahHeap::op_init_updaterefs() {
2536   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2537 
2538   set_evacuation_in_progress(false);
2539 
2540   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2541   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2542   // for future GCLABs here.
2543   if (UseTLAB) {
2544     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_manage_gclabs);
2545     gclabs_retire(true);
2546   }
2547 
2548   if (ShenandoahVerify) {
2549     if (!is_degenerated_gc_in_progress()) {
2550       verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2551     }
2552     verifier()->verify_before_updaterefs();
2553   }
2554 
2555   set_update_refs_in_progress(true);
2556 
2557   _update_refs_iterator.reset();
2558 
2559   if (ShenandoahPacing) {
2560     pacer()->setup_for_updaterefs();
2561   }
2562 }
2563 
2564 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2565 private:


< prev index next >