< prev index next >

src/hotspot/share/gc/shared/referenceProcessor.hpp

Print this page
rev 49912 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49913 : imported patch 8201492-stefanj-review
rev 49914 : [mq]: 8201492-kim-review

*** 36,57 **** // ReferenceProcessor class encapsulates the per-"collector" processing // of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple // generations that are being independently collected -- possibly ! // concurrently and/or incrementally. Note, however, that the // ReferenceProcessor class abstracts away from a generational setting ! // by using only a heap interval (called "span" below), thus allowing ! // its use in a straightforward manner in a general, non-generational ! // setting. // - // The basic idea is that each ReferenceProcessor object concerns - // itself with ("weak") reference processing in a specific "span" - // of the heap of interest to a specific collector. Currently, - // the span is a convex interval of the heap, but, efficiency - // apart, there seems to be no reason it couldn't be extended - // (with appropriate modifications) to any "non-convex interval". // forward references class ReferencePolicy; class AbstractRefProcTaskExecutor; --- 36,52 ---- // ReferenceProcessor class encapsulates the per-"collector" processing // of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple // generations that are being independently collected -- possibly ! // concurrently and/or incrementally. // ReferenceProcessor class abstracts away from a generational setting ! // by using a closure that determines whether a given reference or referent are ! // subject to this ReferenceProcessor's discovery, thus allowing its use in a ! // straightforward manner in a general, non-generational, non-contiguous generation ! // (or heap) setting. // // forward references class ReferencePolicy; class AbstractRefProcTaskExecutor;
*** 166,185 **** NOT_PRODUCT(_processed++); } }; class ReferenceProcessor : public ReferenceDiscoverer { - - private: size_t total_count(DiscoveredList lists[]) const; - protected: // The SoftReference master timestamp clock static jlong _soft_ref_timestamp_clock; ! MemRegion _span; // (right-open) interval of heap ! // subject to wkref discovery bool _discovering_refs; // true when discovery enabled bool _discovery_is_atomic; // if discovery is atomic wrt // other collectors in configuration bool _discovery_is_mt; // true if reference discovery is MT. --- 161,178 ---- NOT_PRODUCT(_processed++); } }; class ReferenceProcessor : public ReferenceDiscoverer { size_t total_count(DiscoveredList lists[]) const; // The SoftReference master timestamp clock static jlong _soft_ref_timestamp_clock; ! BoolObjectClosure* _is_subject_to_discovery; // determines whether a given oop is subject ! // to this ReferenceProcessor's discovery ! // (and further processing). bool _discovering_refs; // true when discovery enabled bool _discovery_is_atomic; // if discovery is atomic wrt // other collectors in configuration bool _discovery_is_mt; // true if reference discovery is MT.
*** 255,277 **** BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase2: remove all those references whose referents are // reachable. ! inline void process_phase2(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, ! VoidClosure* complete_gc) { ! if (discovery_is_atomic()) { ! // complete_gc is ignored in this case for this phase ! pp2_work(refs_list, is_alive, keep_alive); ! } else { ! assert(complete_gc != NULL, "Error"); ! pp2_work_concurrent_discovery(refs_list, is_alive, ! keep_alive, complete_gc); ! } ! } // Work methods in support of process_phase2 void pp2_work(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive); void pp2_work_concurrent_discovery( --- 248,261 ---- BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase2: remove all those references whose referents are // reachable. ! void process_phase2(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, ! VoidClosure* complete_gc); // Work methods in support of process_phase2 void pp2_work(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive); void pp2_work_concurrent_discovery(
*** 310,329 **** const char* list_name(uint i); void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times); - protected: // "Preclean" the given discovered reference list // by removing references with strongly reachable referents. // Currently used in support of CMS only. void preclean_discovered_reflist(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); ! // round-robin mod _num_q (not: _not_ mode _max_num_q) uint next_id() { uint id = _next_id; assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); if (++_next_id == _num_q) { --- 294,312 ---- const char* list_name(uint i); void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times); // "Preclean" the given discovered reference list // by removing references with strongly reachable referents. // Currently used in support of CMS only. void preclean_discovered_reflist(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); ! private: // round-robin mod _num_q (not: _not_ mode _max_num_q) uint next_id() { uint id = _next_id; assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); if (++_next_id == _num_q) {
*** 344,356 **** void balance_queues(DiscoveredList ref_lists[]); // Update (advance) the soft ref master clock field. void update_soft_ref_master_clock(); ! public: // Default parameters give you a vanilla reference processor. ! ReferenceProcessor(MemRegion span, bool mt_processing = false, uint mt_processing_degree = 1, bool mt_discovery = false, uint mt_discovery_degree = 1, bool atomic_discovery = true, BoolObjectClosure* is_alive_non_header = NULL); --- 327,341 ---- void balance_queues(DiscoveredList ref_lists[]); // Update (advance) the soft ref master clock field. void update_soft_ref_master_clock(); ! bool is_subject_to_discovery(oop const obj) const; ! ! public: // Default parameters give you a vanilla reference processor. ! ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, bool mt_processing = false, uint mt_processing_degree = 1, bool mt_discovery = false, uint mt_discovery_degree = 1, bool atomic_discovery = true, BoolObjectClosure* is_alive_non_header = NULL);
*** 371,383 **** } void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { _is_alive_non_header = is_alive_non_header; } ! // get and set span ! MemRegion span() { return _span; } ! void set_span(MemRegion span) { _span = span; } // start and stop weak ref discovery void enable_discovery(bool check_no_refs = true); void disable_discovery() { _discovering_refs = false; } bool discovery_enabled() { return _discovering_refs; } --- 356,367 ---- } void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { _is_alive_non_header = is_alive_non_header; } ! BoolObjectClosure* is_subject_to_discovery_closure() const { return _is_subject_to_discovery; } ! void set_is_subject_to_discovery_closure(BoolObjectClosure* cl) { _is_subject_to_discovery = cl; } // start and stop weak ref discovery void enable_discovery(bool check_no_refs = true); void disable_discovery() { _discovering_refs = false; } bool discovery_enabled() { return _discovering_refs; }
*** 433,442 **** --- 417,446 ---- // debugging void verify_no_references_recorded() PRODUCT_RETURN; void verify_referent(oop obj) PRODUCT_RETURN; }; + // A subject-to-discovery closure that uses a single memory span to determine the area that + // is subject to discovery. Useful for collectors which have contiguous generations. + class SpanSubjectToDiscoveryClosure : public BoolObjectClosure { + MemRegion _span; + + public: + SpanSubjectToDiscoveryClosure() : BoolObjectClosure(), _span() { } + SpanSubjectToDiscoveryClosure(MemRegion span) : BoolObjectClosure(), _span(span) { } + + MemRegion span() const { return _span; } + + void set_span(MemRegion mr) { + _span = mr; + } + + virtual bool do_object_b(oop obj) { + return _span.contains(obj); + } + }; + // A utility class to disable reference discovery in // the scope which contains it, for given ReferenceProcessor. class NoRefDiscovery: StackObj { private: ReferenceProcessor* _rp;
*** 454,481 **** _rp->enable_discovery(false /*check_no_refs*/); } } }; // A utility class to temporarily mutate the span of the // given ReferenceProcessor in the scope that contains it. ! class ReferenceProcessorSpanMutator: StackObj { ! private: ReferenceProcessor* _rp; ! MemRegion _saved_span; ! public: ReferenceProcessorSpanMutator(ReferenceProcessor* rp, MemRegion span): ! _rp(rp) { ! _saved_span = _rp->span(); ! _rp->set_span(span); } ~ReferenceProcessorSpanMutator() { ! _rp->set_span(_saved_span); } }; // A utility class to temporarily change the MT'ness of // reference discovery for the given ReferenceProcessor --- 458,504 ---- _rp->enable_discovery(false /*check_no_refs*/); } } }; + // A utility class to temporarily mutate the subject discovery closure of the + // given ReferenceProcessor in the scope that contains it. + class ReferenceProcessorSubjectToDiscoveryMutator : StackObj { + ReferenceProcessor* _rp; + BoolObjectClosure* _saved_cl; + + public: + ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl): + _rp(rp) { + _saved_cl = _rp->is_subject_to_discovery_closure(); + _rp->set_is_subject_to_discovery_closure(cl); + } + + ~ReferenceProcessorSubjectToDiscoveryMutator() { + _rp->set_is_subject_to_discovery_closure(_saved_cl); + } + }; // A utility class to temporarily mutate the span of the // given ReferenceProcessor in the scope that contains it. ! class ReferenceProcessorSpanMutator : StackObj { ReferenceProcessor* _rp; ! SpanSubjectToDiscoveryClosure _discoverer; ! BoolObjectClosure* _old_discoverer; ! public: ReferenceProcessorSpanMutator(ReferenceProcessor* rp, MemRegion span): ! _rp(rp), ! _discoverer(span), ! _old_discoverer(rp->is_subject_to_discovery_closure()) { ! ! rp->set_is_subject_to_discovery_closure(&_discoverer); } ~ReferenceProcessorSpanMutator() { ! _rp->set_is_subject_to_discovery_closure(_old_discoverer); } }; // A utility class to temporarily change the MT'ness of // reference discovery for the given ReferenceProcessor
*** 496,506 **** ~ReferenceProcessorMTDiscoveryMutator() { _rp->set_mt_discovery(_saved_mt); } }; - // A utility class to temporarily change the disposition // of the "is_alive_non_header" closure field of the // given ReferenceProcessor in the scope that contains it. class ReferenceProcessorIsAliveMutator: StackObj { private: --- 519,528 ----
< prev index next >