--- old/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp 2020-01-22 10:29:39.535637768 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp 2020-01-22 10:29:39.071630175 +0100 @@ -266,6 +266,8 @@ if (obj == NULL) { return false; } + assert_object_is_in_heap(p, obj); + return make_reference_grey(obj); } --- old/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp 2020-01-22 10:29:40.247649421 +0100 +++ new/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp 2020-01-22 10:29:39.703640517 +0100 @@ -67,6 +67,8 @@ T heap_oop = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); + assert_object_is_in_heap(p, obj); + if (mark_object(obj)) { _oop_stack.push(obj); assert(_bitmap->is_marked(obj), "Must be marked now - map self"); --- old/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2020-01-22 10:29:40.951660940 +0100 +++ new/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2020-01-22 10:29:40.427652365 +0100 @@ -192,6 +192,7 @@ template void do_oop_work(T *p) { oop obj = RawAccess<>::oop_load(p); + assert_object_is_in_heap_or_null(p, obj); guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), "Dead object referenced by a not dead object"); } @@ -246,6 +247,7 @@ template void do_oop_work(T *p) { oop obj = RawAccess<>::oop_load(p); + assert_object_is_in_heap_or_null(p, obj); if (_hr->is_open_archive()) { guarantee(obj == NULL || G1ArchiveAllocator::is_archived_object(obj), --- old/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2020-01-22 10:29:41.671672723 +0100 +++ new/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2020-01-22 10:29:41.131663886 +0100 @@ -82,6 +82,8 @@ return; } oop obj = CompressedOops::decode_not_null(heap_oop); + assert_object_is_in_heap(p, obj); + const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); if (region_attr.is_in_cset()) { prefetch_and_push(p, obj); @@ -107,6 +109,8 @@ return; } oop obj = CompressedOops::decode_not_null(heap_oop); + assert_object_is_in_heap(p, obj); + _cm->mark_in_next_bitmap(_worker_id, obj); } @@ -118,6 +122,7 @@ // assert(oopDesc::is_oop_or_null(obj), "expected an oop"); assert(is_object_aligned(obj), "oop must be aligned"); assert(g1h->is_in_reserved(obj), "oop must be in reserved"); + assert_object_is_in_heap(p, obj); HeapRegion* from = g1h->heap_region_containing(p); @@ -264,6 +269,7 @@ if (obj == NULL) { return; } + assert_object_is_in_heap(p, obj); if (HeapRegion::is_in_same_region(p, obj)) { return; --- old/src/hotspot/share/gc/g1/heapRegion.cpp 2020-01-22 10:29:42.307683131 +0100 +++ new/src/hotspot/share/gc/g1/heapRegion.cpp 2020-01-22 10:29:41.839675473 +0100 @@ -521,9 +521,6 @@ obj->print_on(out); #endif // PRODUCT } - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) }; class VerifyLiveClosure : public G1VerificationClosure { @@ -660,9 +657,6 @@ } virtual inline void do_oop(oop* p) { do_oop_work(p); } virtual inline void do_oop(narrowOop* p) { do_oop_work(p); } - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) }; void HeapRegion::verify(VerifyOption vo, --- old/src/hotspot/share/gc/parallel/psCardTable.cpp 2020-01-22 10:29:43.023694849 +0100 +++ new/src/hotspot/share/gc/parallel/psCardTable.cpp 2020-01-22 10:29:42.487686077 +0100 @@ -46,6 +46,8 @@ protected: template void do_oop_work(T* p) { oop obj = RawAccess<>::oop_load(p); + assert_object_is_in_heap_or_null(p, obj); + if (_young_gen->is_in_reserved(obj) && !_card_table->addr_is_marked_imprecise(p)) { // Don't overwrite the first missing card mark @@ -103,6 +105,8 @@ protected: template void do_oop_work(T* p) { oop obj = RawAccess::oop_load(p); + assert_object_is_in_heap_or_null(p, obj); + if (_young_gen->is_in_reserved(obj)) { assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); _card_table->set_card_newgen(p); --- old/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp 2020-01-22 10:29:43.683705649 +0100 +++ new/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp 2020-01-22 10:29:43.215697991 +0100 @@ -47,9 +47,6 @@ template void do_oop_nv(T* p) { _compaction_manager->mark_and_push(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) }; class PCIterateMarkAndPushClosure: public MetadataVisitingOopIterateClosure { @@ -64,9 +61,6 @@ void do_klass_nv(Klass* k) { _compaction_manager->follow_klass(k); } void do_cld_nv(ClassLoaderData* cld) { _compaction_manager->follow_class_loader(cld); } - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) }; inline bool ParCompactionManager::steal(int queue_num, oop& t) { --- old/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp 2020-01-22 10:29:44.379717040 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp 2020-01-22 10:29:43.855708464 +0100 @@ -134,8 +134,6 @@ virtual void do_oop(oop* p) { do_oop_nv(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } private: ParCompactionManager* _cm; --- old/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp 2020-01-22 10:29:45.067728298 +0100 +++ new/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp 2020-01-22 10:29:44.551719854 +0100 @@ -113,9 +113,6 @@ virtual void do_oop(oop* p) { do_oop_nv(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } - - // Don't use the oop verification code in the oop_oop_iterate framework. - debug_only(virtual bool should_verify_oops() { return false; }) }; // --- old/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp 2020-01-22 10:29:45.755739557 +0100 +++ new/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp 2020-01-22 10:29:45.235731047 +0100 @@ -45,7 +45,7 @@ } #endif // ASSERT - Devirtualizer::do_oop_no_verify(_cl, p); + Devirtualizer::do_oop(_cl, p); // Card marking is trickier for weak refs. // This oop is a 'next' field which was filled in while we @@ -77,7 +77,7 @@ } #endif // ASSERT - Devirtualizer::do_oop_no_verify(_cl, p); + Devirtualizer::do_oop(_cl, p); // Optimized for Defnew generation if it's the youngest generation: // we set a younger_gen card if we have an older->youngest --- old/src/hotspot/share/gc/serial/markSweep.cpp 2020-01-22 10:29:46.455751013 +0100 +++ new/src/hotspot/share/gc/serial/markSweep.cpp 2020-01-22 10:29:45.923742307 +0100 @@ -132,6 +132,8 @@ T heap_oop = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); + assert_object_is_in_heap(p, obj); + if (!obj->mark_raw().is_marked()) { mark_object(obj); follow_object(obj); --- old/src/hotspot/share/gc/serial/markSweep.hpp 2020-01-22 10:29:47.143762272 +0100 +++ new/src/hotspot/share/gc/serial/markSweep.hpp 2020-01-22 10:29:46.619753696 +0100 @@ -191,9 +191,6 @@ virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) }; class PreservedMark { --- old/src/hotspot/share/gc/serial/markSweep.inline.hpp 2020-01-22 10:29:47.867774121 +0100 +++ new/src/hotspot/share/gc/serial/markSweep.inline.hpp 2020-01-22 10:29:47.319765152 +0100 @@ -50,6 +50,8 @@ T heap_oop = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); + assert_object_is_in_heap(p, obj); + if (!obj->mark_raw().is_marked()) { mark_object(obj); _marking_stack.push(obj); --- old/src/hotspot/share/gc/shared/cardTableRS.cpp 2020-01-22 10:29:48.495784397 +0100 +++ new/src/hotspot/share/gc/shared/cardTableRS.cpp 2020-01-22 10:29:48.031776804 +0100 @@ -343,6 +343,7 @@ "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(jp), p2i(_begin), p2i(_end)); oop obj = RawAccess<>::oop_load(p); + assert_object_is_in_heap_or_null(p, obj); guarantee(obj == NULL || (HeapWord*)obj >= _boundary, "pointer " PTR_FORMAT " at " PTR_FORMAT " on " "clean card crosses boundary" PTR_FORMAT, --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-01-22 10:29:49.255796834 +0100 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-01-22 10:29:48.671787277 +0100 @@ -534,4 +534,12 @@ } }; +#define assert_object_is_in_heap(oop_addr, obj) \ + assert(Universe::heap()->is_in(obj), \ + "object not in heap " PTR_FORMAT " " PTR_FORMAT, \ + p2i(oop_addr), p2i(obj)) + +#define assert_object_is_in_heap_or_null(oop_addr, obj) \ + debug_only(if (obj != NULL)) assert_object_is_in_heap(oop_addr, obj) + #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP --- old/src/hotspot/share/gc/shared/genOopClosures.inline.hpp 2020-01-22 10:29:49.911807570 +0100 +++ new/src/hotspot/share/gc/shared/genOopClosures.inline.hpp 2020-01-22 10:29:49.439799846 +0100 @@ -118,6 +118,7 @@ // Should we copy the obj? if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); + assert_object_is_in_heap(p, obj); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); oop new_obj = obj->is_forwarded() ? obj->forwardee() --- old/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp 2020-01-22 10:29:50.539817848 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp 2020-01-22 10:29:50.075810254 +0100 @@ -46,6 +46,8 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + assert_object_is_in_heap(p, obj); + if (_cset->is_in((HeapWord *)obj)) { oop fwd = _bs->resolve_forwarded_not_null(obj); if (EVAC && obj == fwd) { --- old/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp 2020-01-22 10:29:51.171828189 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp 2020-01-22 10:29:50.703820531 +0100 @@ -117,6 +117,8 @@ T o = RawAccess<>::oop_load(p); if (! CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + assert_object_is_in_heap(p, obj); + if (_heap->in_collection_set(obj)) { assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); shenandoah_assert_marked(p, obj); --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-01-22 10:29:51.871839645 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-01-22 10:29:51.335830874 +0100 @@ -1235,6 +1235,8 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + assert_object_is_in_heap(p, obj); + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); if (fwd == NULL) { // There is an odd interaction with VM_HeapWalkOperation, see jvmtiTagMap.cpp. --- old/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp 2020-01-22 10:29:52.631852082 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp 2020-01-22 10:29:52.099843376 +0100 @@ -555,6 +555,7 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + assert_object_is_in_heap(p, obj); assert(_ctx->is_marked(obj), "must be marked"); if (obj->is_forwarded()) { oop forw = obj->forwardee(); --- old/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp 2020-01-22 10:29:53.271862556 +0100 +++ new/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp 2020-01-22 10:29:52.807854962 +0100 @@ -81,6 +81,7 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + assert_object_is_in_heap(p, obj); // Single threaded verification can use faster non-atomic stack and bitmap // methods. --- old/src/hotspot/share/gc/z/zHeapIterator.cpp 2020-01-22 10:29:53.907872964 +0100 +++ new/src/hotspot/share/gc/z/zHeapIterator.cpp 2020-01-22 10:29:53.443865370 +0100 @@ -117,12 +117,6 @@ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } - -#ifdef ASSERT - virtual bool should_verify_oops() { - return false; - } -#endif }; ZHeapIterator::ZHeapIterator() : --- old/src/hotspot/share/gc/z/zOopClosures.hpp 2020-01-22 10:29:54.591884157 +0100 +++ new/src/hotspot/share/gc/z/zOopClosures.hpp 2020-01-22 10:29:54.071875647 +0100 @@ -31,12 +31,6 @@ public: virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); - -#ifdef ASSERT - virtual bool should_verify_oops() { - return false; - } -#endif }; class ZNMethodOopClosure : public OopClosure { @@ -52,12 +46,6 @@ virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); - -#ifdef ASSERT - virtual bool should_verify_oops() { - return false; - } -#endif }; class ZPhantomIsAliveObjectClosure : public BoolObjectClosure { --- old/src/hotspot/share/gc/z/zVerify.cpp 2020-01-22 10:29:55.271895285 +0100 +++ new/src/hotspot/share/gc/z/zVerify.cpp 2020-01-22 10:29:54.751886776 +0100 @@ -91,13 +91,6 @@ virtual ReferenceIterationMode reference_iteration_mode() { return _verify_weaks ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; } - -#ifdef ASSERT - // Verification handled by the closure itself - virtual bool should_verify_oops() { - return false; - } -#endif }; template --- old/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp 2020-01-22 10:29:55.915905824 +0100 +++ new/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp 2020-01-22 10:29:55.443898100 +0100 @@ -64,7 +64,6 @@ public: virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; } - virtual bool should_verify_oops() { return false; } BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits); void process(); --- old/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp 2020-01-22 10:29:56.627917477 +0100 +++ new/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp 2020-01-22 10:29:56.083908574 +0100 @@ -56,7 +56,6 @@ public: virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; } - virtual bool should_verify_oops() { return false; } static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge); static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits); --- old/src/hotspot/share/memory/heapShared.cpp 2020-01-22 10:29:57.255927753 +0100 +++ new/src/hotspot/share/memory/heapShared.cpp 2020-01-22 10:29:56.787920094 +0100 @@ -549,6 +549,7 @@ template void do_oop_work(T *p) { oop obj = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(obj)) { + assert_object_is_in_heap(p, obj); assert(!HeapShared::is_archived_object(obj), "original objects must not point to archived objects"); @@ -773,6 +774,8 @@ template void do_oop_work(T *p) { oop obj = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(obj)) { + assert_object_is_in_heap(p, obj); + HeapShared::verify_reachable_objects_from(obj, _is_archived); } } @@ -996,9 +999,6 @@ FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} - virtual bool should_verify_oops(void) { - return false; - } virtual void do_oop(narrowOop* p) { _num_total_oops ++; narrowOop v = *p; --- old/src/hotspot/share/memory/iterator.hpp 2020-01-22 10:29:57.959939274 +0100 +++ new/src/hotspot/share/memory/iterator.hpp 2020-01-22 10:29:57.435930699 +0100 @@ -102,14 +102,6 @@ virtual bool do_metadata() = 0; virtual void do_klass(Klass* k) = 0; virtual void do_cld(ClassLoaderData* cld) = 0; - -#ifdef ASSERT - // Default verification of each visited oop field. - template void verify(T* p); - - // Can be used by subclasses to turn off the default verification of oop fields. - virtual bool should_verify_oops() { return true; } -#endif }; // An OopIterateClosure that can be used when there's no need to visit the Metadata. @@ -351,7 +343,6 @@ // a concrete implementation, otherwise a virtual call is taken. class Devirtualizer { public: - template static void do_oop_no_verify(OopClosureType* closure, T* p); template static void do_oop(OopClosureType* closure, T* p); template static void do_klass(OopClosureType* closure, Klass* k); template static void do_cld(OopClosureType* closure, ClassLoaderData* cld); --- old/src/hotspot/share/memory/iterator.inline.hpp 2020-01-22 10:29:58.655950665 +0100 +++ new/src/hotspot/share/memory/iterator.inline.hpp 2020-01-22 10:29:58.127942024 +0100 @@ -27,7 +27,6 @@ #include "classfile/classLoaderData.hpp" #include "memory/iterator.hpp" -#include "memory/universe.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/klass.hpp" @@ -52,22 +51,6 @@ ClaimMetadataVisitingOopIterateClosure::do_cld(cld); } -#ifdef ASSERT -// This verification is applied to all visited oops. -// The closures can turn is off by overriding should_verify_oops(). -template -void OopIterateClosure::verify(T* p) { - if (should_verify_oops()) { - T heap_oop = RawAccess<>::oop_load(p); - if (!CompressedOops::is_null(heap_oop)) { - oop o = CompressedOops::decode_not_null(heap_oop); - assert(Universe::heap()->is_in(o), - "should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o)); - } - } -} -#endif - // Implementation of the non-virtual do_oop dispatch. // // The same implementation is used for do_metadata, do_klass, and do_cld. @@ -124,15 +107,8 @@ } template -inline void Devirtualizer::do_oop_no_verify(OopClosureType* closure, T* p) { - call_do_oop(&OopClosureType::do_oop, &OopClosure::do_oop, closure, p); -} - -template inline void Devirtualizer::do_oop(OopClosureType* closure, T* p) { - debug_only(closure->verify(p)); - - do_oop_no_verify(closure, p); + call_do_oop(&OopClosureType::do_oop, &OopClosure::do_oop, closure, p); } // Implementation of the non-virtual do_metadata dispatch.