1210 }
1211
1212 void ShenandoahHeap::verify(VerifyOption vo) {
1213 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1214 if (ShenandoahVerify) {
1215 verifier()->verify_generic(vo);
1216 } else {
1217 // TODO: Consider allocating verification bitmaps on demand,
1218 // and turn this on unconditionally.
1219 }
1220 }
1221 }
1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223 return _free_set->capacity();
1224 }
1225
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228 MarkBitMap* _bitmap;
1229 Stack<oop,mtGC>* _oop_stack;
1230
1231 template <class T>
1232 void do_oop_work(T* p) {
1233 T o = RawAccess<>::oop_load(p);
1234 if (!CompressedOops::is_null(o)) {
1235 oop obj = CompressedOops::decode_not_null(o);
1236 oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
1237 if (fwd == NULL) {
1238 // There is an odd interaction with VM_HeapWalkOperation, see jvmtiTagMap.cpp.
1239 //
1240 // That operation walks the reachable objects on its own, storing the marking
1241 // wavefront in the object marks. When it is done, it calls the CollectedHeap
1242 // to iterate over all objects to clean up the mess. When it reaches here,
1243 // the Shenandoah fwdptr resolution code encounters the marked objects with
1244 // NULL forwardee. Trying to act on that would crash the VM. Or fail the
1245 // asserts, should we go for resolve_forwarded_pointer(obj).
1246 //
1247 // Therefore, we have to dodge it by doing the raw access to forwardee, and
1248 // assuming the object had no forwardee, if that thing is NULL.
1249 } else {
1250 obj = fwd;
1251 }
1252 assert(oopDesc::is_oop(obj), "must be a valid oop");
1253 if (!_bitmap->is_marked((HeapWord*) obj)) {
1254 _bitmap->mark((HeapWord*) obj);
1255 _oop_stack->push(obj);
1256 }
1257 }
1258 }
1259 public:
1260 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1261 _bitmap(bitmap), _oop_stack(oop_stack) {}
1262 void do_oop(oop* p) { do_oop_work(p); }
1263 void do_oop(narrowOop* p) { do_oop_work(p); }
1264 };
1265
1266 /*
1267 * This is public API, used in preparation of object_iterate().
1268 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1269 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1270 * control, we call SH::make_tlabs_parsable().
1271 */
1272 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1273 // No-op.
1274 }
1275
1276 /*
1277 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1278 *
1279 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1280 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1281 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1286 *
1287 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1288 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1289 * is allowed to report dead objects, but is not required to do so.
1290 */
1291 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1292 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1293 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1294 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1295 return;
1296 }
1297
1298 // Reset bitmap
1299 _aux_bit_map.clear();
1300
1301 Stack<oop,mtGC> oop_stack;
1302
1303 // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1304 ShenandoahHeapIterationRootScanner rp;
1305 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1306
1307 // If we are unloading classes right now, we should not touch weak roots,
1308 // on the off-chance we would evacuate them and make them live accidentally.
1309 // In other cases, we have to scan all roots.
1310 if (is_evacuation_in_progress() && unload_classes()) {
1311 rp.strong_roots_do(&oops);
1312 } else {
1313 rp.roots_do(&oops);
1314 }
1315
1316 // Work through the oop stack to traverse heap.
1317 while (! oop_stack.is_empty()) {
1318 oop obj = oop_stack.pop();
1319 assert(oopDesc::is_oop(obj), "must be a valid oop");
1320 cl->do_object(obj);
1321 obj->oop_iterate(&oops);
1322 }
1323
1324 assert(oop_stack.is_empty(), "should be empty");
1325
1326 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1327 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1328 }
1329 }
1330
1331 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1332 void ShenandoahHeap::keep_alive(oop obj) {
1333 if (is_concurrent_mark_in_progress()) {
1334 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
2063 MetaspaceGC::compute_new_size();
2064 MetaspaceUtils::verify_metrics();
2065 }
2066
2067 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2068 // so they should not have forwarded oops.
2069 // However, we do need to "null" dead oops in the roots, if can not be done
2070 // in concurrent cycles.
2071 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2072 ShenandoahGCPhase root_phase(full_gc ?
2073 ShenandoahPhaseTimings::full_gc_purge :
2074 ShenandoahPhaseTimings::purge);
2075 uint num_workers = _workers->active_workers();
2076 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2077 ShenandoahPhaseTimings::full_gc_purge_par :
2078 ShenandoahPhaseTimings::purge_par;
2079 // Cleanup weak roots
2080 ShenandoahGCPhase phase(timing_phase);
2081 if (has_forwarded_objects()) {
2082 if (is_traversal_mode()) {
2083 ShenandoahForwardedIsAliveClosure is_alive;
2084 ShenandoahTraversalUpdateRefsClosure keep_alive;
2085 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalUpdateRefsClosure>
2086 cleaning_task(&is_alive, &keep_alive, num_workers);
2087 _workers->run_task(&cleaning_task);
2088 } else {
2089 ShenandoahForwardedIsAliveClosure is_alive;
2090 ShenandoahUpdateRefsClosure keep_alive;
2091 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2092 cleaning_task(&is_alive, &keep_alive, num_workers);
2093 _workers->run_task(&cleaning_task);
2094 }
2095 } else {
2096 ShenandoahIsAliveClosure is_alive;
2097 #ifdef ASSERT
2098 ShenandoahAssertNotForwardedClosure verify_cl;
2099 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2100 cleaning_task(&is_alive, &verify_cl, num_workers);
2101 #else
2102 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2103 cleaning_task(&is_alive, &do_nothing_cl, num_workers);
2104 #endif
2105 _workers->run_task(&cleaning_task);
2106 }
2107 }
2108
2109 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2110 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2111 stw_process_weak_roots(full_gc);
|
1210 }
1211
1212 void ShenandoahHeap::verify(VerifyOption vo) {
1213 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1214 if (ShenandoahVerify) {
1215 verifier()->verify_generic(vo);
1216 } else {
1217 // TODO: Consider allocating verification bitmaps on demand,
1218 // and turn this on unconditionally.
1219 }
1220 }
1221 }
1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223 return _free_set->capacity();
1224 }
1225
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228 MarkBitMap* _bitmap;
1229 Stack<oop,mtGC>* _oop_stack;
1230 ShenandoahHeap* const _heap;
1231 ShenandoahMarkingContext* const _marking_context;
1232
1233 template <class T>
1234 void do_oop_work(T* p) {
1235 T o = RawAccess<>::oop_load(p);
1236 if (!CompressedOops::is_null(o)) {
1237 oop obj = CompressedOops::decode_not_null(o);
1238
1239 // In concurrent roots phase, there are dead oops in weak roots.
1240 if (_heap->is_concurrent_root_in_progress() &&
1241 !_marking_context->is_marked(obj)) {
1242 return;
1243 }
1244
1245 // Only oops in collection set have forwarding pointers
1246 if (_heap->in_collection_set(obj)) {
1247 obj = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
1248 }
1249
1250 assert(oopDesc::is_oop(obj), "must be a valid oop");
1251 if (!_bitmap->is_marked((HeapWord*) obj)) {
1252 _bitmap->mark((HeapWord*) obj);
1253 _oop_stack->push(obj);
1254 }
1255 }
1256 }
1257 public:
1258 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1259 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1260 _marking_context(ShenandoahHeap::heap()->marking_context()) {}
1261 void do_oop(oop* p) { do_oop_work(p); }
1262 void do_oop(narrowOop* p) { do_oop_work(p); }
1263 };
1264
1265 /*
1266 * This is public API, used in preparation of object_iterate().
1267 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1268 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1269 * control, we call SH::make_tlabs_parsable().
1270 */
1271 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1272 // No-op.
1273 }
1274
1275 /*
1276 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1277 *
1278 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1279 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1280 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1285 *
1286 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1287 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1288 * is allowed to report dead objects, but is not required to do so.
1289 */
1290 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1291 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1292 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1293 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1294 return;
1295 }
1296
1297 // Reset bitmap
1298 _aux_bit_map.clear();
1299
1300 Stack<oop,mtGC> oop_stack;
1301
1302 // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1303 ShenandoahHeapIterationRootScanner rp;
1304 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1305 rp.roots_do(&oops);
1306
1307 // Work through the oop stack to traverse heap.
1308 while (! oop_stack.is_empty()) {
1309 oop obj = oop_stack.pop();
1310 assert(oopDesc::is_oop(obj), "must be a valid oop");
1311 cl->do_object(obj);
1312 obj->oop_iterate(&oops);
1313 }
1314
1315 assert(oop_stack.is_empty(), "should be empty");
1316
1317 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1318 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1319 }
1320 }
1321
1322 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1323 void ShenandoahHeap::keep_alive(oop obj) {
1324 if (is_concurrent_mark_in_progress()) {
1325 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
2054 MetaspaceGC::compute_new_size();
2055 MetaspaceUtils::verify_metrics();
2056 }
2057
2058 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2059 // so they should not have forwarded oops.
2060 // However, we do need to "null" dead oops in the roots, if can not be done
2061 // in concurrent cycles.
2062 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2063 ShenandoahGCPhase root_phase(full_gc ?
2064 ShenandoahPhaseTimings::full_gc_purge :
2065 ShenandoahPhaseTimings::purge);
2066 uint num_workers = _workers->active_workers();
2067 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2068 ShenandoahPhaseTimings::full_gc_purge_par :
2069 ShenandoahPhaseTimings::purge_par;
2070 // Cleanup weak roots
2071 ShenandoahGCPhase phase(timing_phase);
2072 if (has_forwarded_objects()) {
2073 if (is_traversal_mode()) {
2074 ShenandoahForwardedIsAliveNoCSetCheckClosure is_alive;
2075 ShenandoahTraversalUpdateRefsClosure keep_alive;
2076 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveNoCSetCheckClosure, ShenandoahTraversalUpdateRefsClosure>
2077 cleaning_task(&is_alive, &keep_alive, num_workers);
2078 _workers->run_task(&cleaning_task);
2079 } else {
2080 ShenandoahForwardedIsAliveNoCSetCheckClosure is_alive;
2081 ShenandoahUpdateRefsClosure keep_alive;
2082 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveNoCSetCheckClosure, ShenandoahUpdateRefsClosure>
2083 cleaning_task(&is_alive, &keep_alive, num_workers);
2084 _workers->run_task(&cleaning_task);
2085 }
2086 } else {
2087 ShenandoahIsAliveClosure is_alive;
2088 #ifdef ASSERT
2089 ShenandoahAssertNotForwardedClosure verify_cl;
2090 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2091 cleaning_task(&is_alive, &verify_cl, num_workers);
2092 #else
2093 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2094 cleaning_task(&is_alive, &do_nothing_cl, num_workers);
2095 #endif
2096 _workers->run_task(&cleaning_task);
2097 }
2098 }
2099
2100 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2101 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2102 stw_process_weak_roots(full_gc);
|