--- old/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2018-05-08 09:40:58.276126527 +0200 +++ new/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2018-05-08 09:40:57.984117504 +0200 @@ -290,13 +290,13 @@ if (_ref_processor == NULL) { // Allocate and initialize a reference processor _ref_processor = - new SpanReferenceProcessor(_span, // span - (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing - ParallelGCThreads, // mt processing degree - _cmsGen->refs_discovery_is_mt(), // mt discovery - MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree - _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic - &_is_alive_closure); // closure for liveness info + new ReferenceProcessor(&_span_discoverer, // span + (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing + ParallelGCThreads, // mt processing degree + _cmsGen->refs_discovery_is_mt(), // mt discovery + MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree + _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic + &_is_alive_closure); // closure for liveness info // Initialize the _ref_processor field of CMSGen _cmsGen->set_ref_processor(_ref_processor); @@ -445,7 +445,10 @@ CardTableRS* ct, ConcurrentMarkSweepPolicy* cp): _cmsGen(cmsGen), + // Adjust span to cover old (cms) gen + _span(cmsGen->reserved()), _ct(ct), + _span_discoverer(_span), _ref_processor(NULL), // will be set later _conc_workers(NULL), // may be set later _abort_preclean(false), @@ -455,8 +458,6 @@ _modUnionTable((CardTable::card_shift - LogHeapWordSize), -1 /* lock-free */, "No_lock" /* dummy */), _modUnionClosurePar(&_modUnionTable), - // Adjust my span to cover old (cms) gen - _span(cmsGen->reserved()), // Construct the is_alive_closure with _span & markBitMap _is_alive_closure(_span, &_markBitMap), _restart_addr(NULL), @@ -3752,7 +3753,7 @@ // Precleaning is currently not MT but the reference processor // may be set for MT. Disable it temporarily here. - SpanReferenceProcessor* rp = ref_processor(); + ReferenceProcessor* rp = ref_processor(); ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); // Do one pass of scrubbing the discovered reference lists @@ -3760,7 +3761,7 @@ // referents. if (clean_refs) { CMSPrecleanRefsYieldClosure yield_cl(this); - assert(rp->span().equals(_span), "Spans should be equal"); + assert(_span_discoverer.span().equals(_span), "Spans should be equal"); CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, &_markStack, true /* preclean */); CMSDrainMarkingStackClosure complete_trace(this, @@ -5152,7 +5153,7 @@ WorkGang* workers = heap->workers(); assert(workers != NULL, "Need parallel worker threads."); CMSRefProcTaskProxy rp_task(task, &_collector, - _collector.ref_processor()->span(), + _collector.ref_processor_span(), _collector.markBitMap(), workers, _collector.task_queues()); workers->run_task(&rp_task); @@ -5172,8 +5173,8 @@ ResourceMark rm; HandleMark hm; - SpanReferenceProcessor* rp = ref_processor(); - assert(rp->span().equals(_span), "Spans should be equal"); + ReferenceProcessor* rp = ref_processor(); + assert(_span_discoverer.span().equals(_span), "Spans should be equal"); assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); // Process weak references. rp->setup_policy(false); --- old/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp 2018-05-08 09:40:59.556166078 +0200 +++ new/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp 2018-05-08 09:40:59.267157148 +0200 @@ -617,7 +617,7 @@ protected: ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS) - MemRegion _span; // Span covering above two + MemRegion _span; // Span covering above CardTableRS* _ct; // Card table // CMS marking support structures @@ -641,8 +641,9 @@ NOT_PRODUCT(ssize_t _num_par_pushes;) // ("Weak") Reference processing support. - SpanReferenceProcessor* _ref_processor; - CMSIsAliveClosure _is_alive_closure; + SpanSubjectToDiscoveryClosure _span_discoverer; + ReferenceProcessor* _ref_processor; + CMSIsAliveClosure _is_alive_closure; // Keep this textually after _markBitMap and _span; c'tor dependency. ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work @@ -841,7 +842,8 @@ ConcurrentMarkSweepPolicy* cp); ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } - SpanReferenceProcessor* ref_processor() { return _ref_processor; } + MemRegion ref_processor_span() const { return _span_discoverer.span(); } + ReferenceProcessor* ref_processor() { return _ref_processor; } void ref_processor_init(); Mutex* bitMapLock() const { return _markBitMap.lock(); } --- old/src/hotspot/share/gc/cms/parNewGeneration.cpp 2018-05-08 09:41:00.883207083 +0200 +++ new/src/hotspot/share/gc/cms/parNewGeneration.cpp 2018-05-08 09:41:00.580197720 +0200 @@ -1471,14 +1471,15 @@ void ParNewGeneration::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor + _span_discoverer.set_span(_reserved); _ref_processor = - new SpanReferenceProcessor(_reserved, // span - ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - ParallelGCThreads, // mt processing degree - refs_discovery_is_mt(), // mt discovery - ParallelGCThreads, // mt discovery degree - refs_discovery_is_atomic(), // atomic_discovery - NULL); // is_alive_non_header + new ReferenceProcessor(&_span_discoverer, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + ParallelGCThreads, // mt processing degree + refs_discovery_is_mt(), // mt discovery + ParallelGCThreads, // mt discovery degree + refs_discovery_is_atomic(), // atomic_discovery + NULL); // is_alive_non_header } } --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp 2018-05-08 09:41:02.127245522 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp 2018-05-08 09:41:01.827236252 +0200 @@ -39,20 +39,14 @@ #include "utilities/bitMap.inline.hpp" inline bool G1CMIsAliveClosure::do_object_b(oop obj) { - if (obj == NULL) { - return false; - } - assert(_g1h->is_in_reserved(obj), "Asked for liveness of oop " PTR_FORMAT " outside of reserved heap.", p2i(obj)); - // Young regions have nTAMS == bottom(), i.e. all objects there are implicitly live, - // so we do not need to explicitly check for region type. - bool result = !_g1h->is_obj_ill(obj, _g1h->heap_region_containing(obj)); - assert(_g1h->heap_region_containing(obj)->is_old_or_humongous() || result, - "Oop " PTR_FORMAT " in young region %u (%s) should be live", - p2i(obj), _g1h->addr_to_region((HeapWord*)obj), _g1h->heap_region_containing(obj)->get_short_type_str()); - return result; + return !_g1h->is_obj_ill(obj); } inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) { + // Re-check whether the passed object is null. With ReferentBasedDiscovery the + // mutator may have changed the referent's value (i.e. cleared it) between the + // time the referent was determined to be potentially alive and calling this + // method. if (obj == NULL) { return false; } --- old/src/hotspot/share/gc/parallel/asPSYoungGen.cpp 2018-05-08 09:41:03.287281366 +0200 +++ new/src/hotspot/share/gc/parallel/asPSYoungGen.cpp 2018-05-08 09:41:03.001272529 +0200 @@ -496,7 +496,7 @@ _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), (HeapWord*)virtual_space()->high_boundary()); - PSScavenge::reference_processor()->set_span(_reserved); + PSScavenge::set_subject_to_discovery_span(_reserved); HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low(); HeapWord* eden_bottom = eden_space()->bottom(); --- old/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2018-05-08 09:41:04.513319249 +0200 +++ new/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2018-05-08 09:41:04.227310412 +0200 @@ -65,9 +65,11 @@ jlong PSMarkSweep::_time_of_last_gc = 0; CollectorCounters* PSMarkSweep::_counters = NULL; +SpanSubjectToDiscoveryClosure PSMarkSweep::_span_discoverer; + void PSMarkSweep::initialize() { - MemRegion mr = ParallelScavengeHeap::heap()->reserved_region(); - set_ref_processor(new SpanReferenceProcessor(mr)); // a vanilla ref proc + _span_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region()); + set_ref_processor(new ReferenceProcessor(&_span_discoverer)); // a vanilla ref proc _counters = new CollectorCounters("PSMarkSweep", 1); } --- old/src/hotspot/share/gc/parallel/psMarkSweep.hpp 2018-05-08 09:41:05.682355371 +0200 +++ new/src/hotspot/share/gc/parallel/psMarkSweep.hpp 2018-05-08 09:41:05.394346472 +0200 @@ -27,6 +27,7 @@ #include "gc/serial/markSweep.hpp" #include "gc/shared/collectorCounters.hpp" +#include "gc/shared/referenceProcessor.hpp" #include "utilities/stack.hpp" class PSAdaptiveSizePolicy; @@ -39,6 +40,8 @@ static jlong _time_of_last_gc; // ms static CollectorCounters* _counters; + static SpanSubjectToDiscoveryClosure _span_discoverer; + // Closure accessors static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; } static VoidClosure* follow_stack_closure() { return &MarkSweep::follow_stack_closure; } --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-05-08 09:41:06.927393842 +0200 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-05-08 09:41:06.639384943 +0200 @@ -117,6 +117,7 @@ SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; +SpanSubjectToDiscoveryClosure PSParallelCompact::_span_discoverer; ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; double PSParallelCompact::_dwl_mean; @@ -843,15 +844,15 @@ void PSParallelCompact::post_initialize() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); - MemRegion mr = heap->reserved_region(); + _span_discoverer.set_span(heap->reserved_region()); _ref_processor = - new SpanReferenceProcessor(mr, // span - ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - ParallelGCThreads, // mt processing degree - true, // mt discovery - ParallelGCThreads, // mt discovery degree - true, // atomic_discovery - &_is_alive_closure); // non-header is alive closure + new ReferenceProcessor(&_span_discoverer, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + ParallelGCThreads, // mt processing degree + true, // mt discovery + ParallelGCThreads, // mt discovery degree + true, // atomic_discovery + &_is_alive_closure); // non-header is alive closure _counters = new CollectorCounters("PSParallelCompact", 1); // Initialize static fields in ParCompactionManager. --- old/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2018-05-08 09:41:08.133431107 +0200 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2018-05-08 09:41:07.841422084 +0200 @@ -968,6 +968,7 @@ static SpaceInfo _space_info[last_space_id]; // Reference processing (used in ...follow_contents) + static SpanSubjectToDiscoveryClosure _span_discoverer; static ReferenceProcessor* _ref_processor; // Values computed at initialization and used by dead_wood_limiter(). --- old/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-05-08 09:41:09.328468033 +0200 +++ new/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-05-08 09:41:09.036459010 +0200 @@ -58,18 +58,19 @@ #include "services/memoryService.hpp" #include "utilities/stack.inline.hpp" -HeapWord* PSScavenge::_to_space_top_before_gc = NULL; -int PSScavenge::_consecutive_skipped_scavenges = 0; -SpanReferenceProcessor* PSScavenge::_ref_processor = NULL; -PSCardTable* PSScavenge::_card_table = NULL; -bool PSScavenge::_survivor_overflow = false; -uint PSScavenge::_tenuring_threshold = 0; -HeapWord* PSScavenge::_young_generation_boundary = NULL; -uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; -elapsedTimer PSScavenge::_accumulated_time; -STWGCTimer PSScavenge::_gc_timer; -ParallelScavengeTracer PSScavenge::_gc_tracer; -CollectorCounters* PSScavenge::_counters = NULL; +HeapWord* PSScavenge::_to_space_top_before_gc = NULL; +int PSScavenge::_consecutive_skipped_scavenges = 0; +SpanSubjectToDiscoveryClosure PSScavenge::_span_discoverer; +ReferenceProcessor* PSScavenge::_ref_processor = NULL; +PSCardTable* PSScavenge::_card_table = NULL; +bool PSScavenge::_survivor_overflow = false; +uint PSScavenge::_tenuring_threshold = 0; +HeapWord* PSScavenge::_young_generation_boundary = NULL; +uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; +elapsedTimer PSScavenge::_accumulated_time; +STWGCTimer PSScavenge::_gc_timer; +ParallelScavengeTracer PSScavenge::_gc_tracer; +CollectorCounters* PSScavenge::_counters = NULL; // Define before use class PSIsAliveClosure: public BoolObjectClosure { @@ -766,16 +767,15 @@ set_young_generation_boundary(young_gen->eden_space()->bottom()); // Initialize ref handling object for scavenging. - MemRegion mr = young_gen->reserved(); - + _span_discoverer.set_span(young_gen->reserved()); _ref_processor = - new SpanReferenceProcessor(mr, // span - ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - ParallelGCThreads, // mt processing degree - true, // mt discovery - ParallelGCThreads, // mt discovery degree - true, // atomic_discovery - NULL); // header provides liveness info + new ReferenceProcessor(&_span_discoverer, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + ParallelGCThreads, // mt processing degree + true, // mt discovery + ParallelGCThreads, // mt discovery degree + true, // atomic_discovery + NULL); // header provides liveness info // Cache the cardtable _card_table = heap->card_table(); --- old/src/hotspot/share/gc/parallel/psScavenge.hpp 2018-05-08 09:41:10.526505051 +0200 +++ new/src/hotspot/share/gc/parallel/psScavenge.hpp 2018-05-08 09:41:10.224495719 +0200 @@ -65,14 +65,15 @@ protected: // Flags/counters - static SpanReferenceProcessor* _ref_processor; // Reference processor for scavenging. - static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing - static PSCardTable* _card_table; // We cache the card table for fast access. - static bool _survivor_overflow; // Overflow this collection - static uint _tenuring_threshold; // tenuring threshold for next scavenge - static elapsedTimer _accumulated_time; // total time spent on scavenge - static STWGCTimer _gc_timer; // GC time book keeper - static ParallelScavengeTracer _gc_tracer; // GC tracing + static SpanSubjectToDiscoveryClosure _span_discoverer; + static ReferenceProcessor* _ref_processor; // Reference processor for scavenging. + static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing + static PSCardTable* _card_table; // We cache the card table for fast access. + static bool _survivor_overflow; // Overflow this collection + static uint _tenuring_threshold; // tenuring threshold for next scavenge + static elapsedTimer _accumulated_time; // total time spent on scavenge + static STWGCTimer _gc_timer; // GC time book keeper + static ParallelScavengeTracer _gc_tracer; // GC tracing // The lowest address possible for the young_gen. // This is used to decide if an oop should be scavenged, // cards should be marked, etc. @@ -102,8 +103,11 @@ // Performance Counters static CollectorCounters* counters() { return _counters; } + static void set_subject_to_discovery_span(MemRegion mr) { + _span_discoverer.set_span(mr); + } // Used by scavenge_contents && psMarkSweep - static SpanReferenceProcessor* const reference_processor() { + static ReferenceProcessor* const reference_processor() { assert(_ref_processor != NULL, "Sanity"); return _ref_processor; } --- old/src/hotspot/share/gc/parallel/psYoungGen.cpp 2018-05-08 09:41:11.777543706 +0200 +++ new/src/hotspot/share/gc/parallel/psYoungGen.cpp 2018-05-08 09:41:11.488534776 +0200 @@ -839,7 +839,7 @@ void PSYoungGen::reset_survivors_after_shrink() { _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), (HeapWord*)virtual_space()->high_boundary()); - PSScavenge::reference_processor()->set_span(_reserved); + PSScavenge::set_subject_to_discovery_span(_reserved); MutableSpace* space_shrinking = NULL; if (from_space()->end() > to_space()->end()) { --- old/src/hotspot/share/gc/shared/generation.cpp 2018-05-08 09:41:14.051613973 +0200 +++ new/src/hotspot/share/gc/shared/generation.cpp 2018-05-08 09:41:13.760604981 +0200 @@ -77,7 +77,8 @@ void Generation::ref_processor_init() { assert(_ref_processor == NULL, "a reference processor already exists"); assert(!_reserved.is_empty(), "empty generation?"); - _ref_processor = new SpanReferenceProcessor(_reserved); // a vanilla reference processor + _span_discoverer.set_span(_reserved); + _ref_processor = new ReferenceProcessor(&_span_discoverer); // a vanilla reference processor if (_ref_processor == NULL) { vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); } --- old/src/hotspot/share/gc/shared/generation.hpp 2018-05-08 09:41:15.233650497 +0200 +++ new/src/hotspot/share/gc/shared/generation.hpp 2018-05-08 09:41:14.940641443 +0200 @@ -100,7 +100,8 @@ VirtualSpace _virtual_space; // ("Weak") Reference processing support - SpanReferenceProcessor* _ref_processor; + SpanSubjectToDiscoveryClosure _span_discoverer; + ReferenceProcessor* _ref_processor; // Performance Counters CollectorCounters* _gc_counters; @@ -139,7 +140,7 @@ // allocate and initialize ("weak") refs processing support virtual void ref_processor_init(); - void set_ref_processor(SpanReferenceProcessor* rp) { + void set_ref_processor(ReferenceProcessor* rp) { assert(_ref_processor == NULL, "clobbering existing _ref_processor"); _ref_processor = rp; } @@ -484,7 +485,7 @@ virtual const char* short_name() const = 0; // Reference Processing accessor - SpanReferenceProcessor* const ref_processor() { return _ref_processor; } + ReferenceProcessor* const ref_processor() { return _ref_processor; } // Iteration. --- old/src/hotspot/share/gc/shared/referenceProcessor.cpp 2018-05-08 09:41:16.422687237 +0200 +++ new/src/hotspot/share/gc/shared/referenceProcessor.cpp 2018-05-08 09:41:16.127678121 +0200 @@ -132,24 +132,6 @@ setup_policy(false /* default soft ref policy */); } -SpanReferenceProcessor::SpanReferenceProcessor(MemRegion span, - bool mt_processing, - uint mt_processing_degree, - bool mt_discovery, - uint mt_discovery_degree, - bool atomic_discovery, - BoolObjectClosure* is_alive_non_header) : - ReferenceProcessor(&_span_based_discoverer, - mt_processing, - mt_processing_degree, - mt_discovery, - mt_discovery_degree, - atomic_discovery, - is_alive_non_header), - _span_based_discoverer(span) { - -} - #ifndef PRODUCT void ReferenceProcessor::verify_no_references_recorded() { guarantee(!_discovering_refs, "Discovering refs?"); @@ -974,8 +956,7 @@ } #endif -template -bool ReferenceProcessor::is_subject_to_discovery(T const obj) const { +bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { return _is_subject_to_discovery->do_object_b(obj); } --- old/src/hotspot/share/gc/shared/referenceProcessor.hpp 2018-05-08 09:41:17.610723946 +0200 +++ new/src/hotspot/share/gc/shared/referenceProcessor.hpp 2018-05-08 09:41:17.315714830 +0200 @@ -329,8 +329,8 @@ // Update (advance) the soft ref master clock field. void update_soft_ref_master_clock(); - template - bool is_subject_to_discovery(T const obj) const; + bool is_subject_to_discovery(oop const obj) const; + public: // Default parameters give you a vanilla reference processor. ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, @@ -419,31 +419,24 @@ void verify_referent(oop obj) PRODUCT_RETURN; }; -// A reference processor that uses a single memory span to determine the area that +// A subject-to-discovery closure that uses a single memory span to determine the area that // is subject to discovery. Useful for collectors which have contiguous generations. -class SpanReferenceProcessor : public ReferenceProcessor { - class SpanBasedDiscoverer : public BoolObjectClosure { - public: - MemRegion _span; +class SpanSubjectToDiscoveryClosure : public BoolObjectClosure { + MemRegion _span; - SpanBasedDiscoverer(MemRegion span) : BoolObjectClosure(), _span(span) { } +public: + SpanSubjectToDiscoveryClosure() : BoolObjectClosure(), _span() { } + SpanSubjectToDiscoveryClosure(MemRegion span) : BoolObjectClosure(), _span(span) { } - virtual bool do_object_b(oop obj) { - return _span.contains(obj); - } - }; + MemRegion span() const { return _span; } - SpanBasedDiscoverer _span_based_discoverer; -public: - SpanReferenceProcessor(MemRegion span, - bool mt_processing = false, uint mt_processing_degree = 1, - bool mt_discovery = false, uint mt_discovery_degree = 1, - bool atomic_discovery = true, - BoolObjectClosure* is_alive_non_header = NULL); - - // get and set span - MemRegion span() { return _span_based_discoverer._span; } - void set_span(MemRegion span) { _span_based_discoverer._span = span; } + void set_span(MemRegion mr) { + _span = mr; + } + + virtual bool do_object_b(oop obj) { + return _span.contains(obj); + } }; // A utility class to disable reference discovery in @@ -470,11 +463,10 @@ // A utility class to temporarily mutate the subject discovery closure of the // given ReferenceProcessor in the scope that contains it. class ReferenceProcessorSubjectToDiscoveryMutator : StackObj { - private: ReferenceProcessor* _rp; BoolObjectClosure* _saved_cl; - public: +public: ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl): _rp(rp) { _saved_cl = _rp->is_subject_to_discovery_closure(); @@ -488,21 +480,21 @@ // A utility class to temporarily mutate the span of the // given ReferenceProcessor in the scope that contains it. -class ReferenceProcessorSpanMutator: StackObj { - private: - SpanReferenceProcessor* _rp; - MemRegion _saved_span; +class ReferenceProcessorSpanMutator : StackObj { + ReferenceProcessor* _rp; + SpanSubjectToDiscoveryClosure _discoverer; + BoolObjectClosure* _old_discoverer; - public: - ReferenceProcessorSpanMutator(SpanReferenceProcessor* rp, +public: + ReferenceProcessorSpanMutator(ReferenceProcessor* rp, MemRegion span): - _rp(rp) { - _saved_span = _rp->span(); - _rp->set_span(span); + _rp(rp), _discoverer(span) { + _old_discoverer = rp->is_subject_to_discovery_closure(); + rp->set_is_subject_to_discovery_closure(&_discoverer); } ~ReferenceProcessorSpanMutator() { - _rp->set_span(_saved_span); + _rp->set_is_subject_to_discovery_closure(_old_discoverer); } };