1213 }
1214
1215 void ShenandoahHeap::verify(VerifyOption vo) {
1216 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1217 if (ShenandoahVerify) {
1218 verifier()->verify_generic(vo);
1219 } else {
1220 // TODO: Consider allocating verification bitmaps on demand,
1221 // and turn this on unconditionally.
1222 }
1223 }
1224 }
1225 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1226 return _free_set->capacity();
1227 }
1228
1229 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1230 private:
1231 MarkBitMap* _bitmap;
1232 Stack<oop,mtGC>* _oop_stack;
1233
1234 template <class T>
1235 void do_oop_work(T* p) {
1236 T o = RawAccess<>::oop_load(p);
1237 if (!CompressedOops::is_null(o)) {
1238 oop obj = CompressedOops::decode_not_null(o);
1239 oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
1240 if (fwd == NULL) {
1241 // There is an odd interaction with VM_HeapWalkOperation, see jvmtiTagMap.cpp.
1242 //
1243 // That operation walks the reachable objects on its own, storing the marking
1244 // wavefront in the object marks. When it is done, it calls the CollectedHeap
1245 // to iterate over all objects to clean up the mess. When it reaches here,
1246 // the Shenandoah fwdptr resolution code encounters the marked objects with
1247 // NULL forwardee. Trying to act on that would crash the VM. Or fail the
1248 // asserts, should we go for resolve_forwarded_pointer(obj).
1249 //
1250 // Therefore, we have to dodge it by doing the raw access to forwardee, and
1251 // assuming the object had no forwardee, if that thing is NULL.
1252 } else {
1253 obj = fwd;
1254 }
1255 assert(oopDesc::is_oop(obj), "must be a valid oop");
1256 if (!_bitmap->is_marked(obj)) {
1257 _bitmap->mark(obj);
1258 _oop_stack->push(obj);
1259 }
1260 }
1261 }
1262 public:
1263 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1264 _bitmap(bitmap), _oop_stack(oop_stack) {}
1265 void do_oop(oop* p) { do_oop_work(p); }
1266 void do_oop(narrowOop* p) { do_oop_work(p); }
1267 };
1268
1269 /*
1270 * This is public API, used in preparation of object_iterate().
1271 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1272 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1273 * control, we call SH::make_tlabs_parsable().
1274 */
1275 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1276 // No-op.
1277 }
1278
1279 /*
1280 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1281 *
1282 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1283 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1284 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1290 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1291 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1292 * is allowed to report dead objects, but is not required to do so.
1293 */
1294 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1295 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1296 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1297 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1298 return;
1299 }
1300
1301 // Reset bitmap
1302 _aux_bit_map.clear();
1303
1304 Stack<oop,mtGC> oop_stack;
1305
1306 // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1307 ShenandoahHeapIterationRootScanner rp;
1308 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1309
1310 // When concurrent root is in progress, weak roots may contain dead oops, they should not be used
1311 // for root scanning.
1312 if (is_concurrent_root_in_progress()) {
1313 rp.strong_roots_do(&oops);
1314 } else {
1315 rp.roots_do(&oops);
1316 }
1317
1318 // Work through the oop stack to traverse heap.
1319 while (! oop_stack.is_empty()) {
1320 oop obj = oop_stack.pop();
1321 assert(oopDesc::is_oop(obj), "must be a valid oop");
1322 cl->do_object(obj);
1323 obj->oop_iterate(&oops);
1324 }
1325
1326 assert(oop_stack.is_empty(), "should be empty");
1327
1328 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1329 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1330 }
1331 }
1332
1333 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1334 void ShenandoahHeap::keep_alive(oop obj) {
1335 if (is_concurrent_mark_in_progress()) {
1336 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
|
1213 }
1214
1215 void ShenandoahHeap::verify(VerifyOption vo) {
1216 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1217 if (ShenandoahVerify) {
1218 verifier()->verify_generic(vo);
1219 } else {
1220 // TODO: Consider allocating verification bitmaps on demand,
1221 // and turn this on unconditionally.
1222 }
1223 }
1224 }
1225 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1226 return _free_set->capacity();
1227 }
1228
1229 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1230 private:
1231 MarkBitMap* _bitmap;
1232 Stack<oop,mtGC>* _oop_stack;
1233 ShenandoahHeap* const _heap;
1234 ShenandoahMarkingContext* const _marking_context;
1235
1236 template <class T>
1237 void do_oop_work(T* p) {
1238 T o = RawAccess<>::oop_load(p);
1239 if (!CompressedOops::is_null(o)) {
1240 oop obj = CompressedOops::decode_not_null(o);
1241 if (_heap->is_concurrent_root_in_progress() && !_marking_context->is_marked(obj)) {
1242 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1243 return;
1244 }
1245 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1246
1247 assert(oopDesc::is_oop(obj), "must be a valid oop");
1248 if (!_bitmap->is_marked(obj)) {
1249 _bitmap->mark(obj);
1250 _oop_stack->push(obj);
1251 }
1252 }
1253 }
1254 public:
1255 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1256 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1257 _marking_context(_heap->marking_context()) {}
1258 void do_oop(oop* p) { do_oop_work(p); }
1259 void do_oop(narrowOop* p) { do_oop_work(p); }
1260 };
1261
1262 /*
1263 * This is public API, used in preparation of object_iterate().
1264 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1265 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1266 * control, we call SH::make_tlabs_parsable().
1267 */
1268 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1269 // No-op.
1270 }
1271
1272 /*
1273 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1274 *
1275 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1276 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1277 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1283 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1284 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1285 * is allowed to report dead objects, but is not required to do so.
1286 */
1287 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1288 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1289 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1290 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1291 return;
1292 }
1293
1294 // Reset bitmap
1295 _aux_bit_map.clear();
1296
1297 Stack<oop,mtGC> oop_stack;
1298
1299 // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1300 ShenandoahHeapIterationRootScanner rp;
1301 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1302
1303 rp.roots_do(&oops);
1304
1305 // Work through the oop stack to traverse heap.
1306 while (! oop_stack.is_empty()) {
1307 oop obj = oop_stack.pop();
1308 assert(oopDesc::is_oop(obj), "must be a valid oop");
1309 cl->do_object(obj);
1310 obj->oop_iterate(&oops);
1311 }
1312
1313 assert(oop_stack.is_empty(), "should be empty");
1314
1315 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1316 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1317 }
1318 }
1319
1320 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1321 void ShenandoahHeap::keep_alive(oop obj) {
1322 if (is_concurrent_mark_in_progress()) {
1323 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
|