hotspot/src/share/vm/memory/referenceProcessor.hpp

Print this page
rev 611 : Merge

*** 1,10 **** #ifdef USE_PRAGMA_IDENT_HDR #pragma ident "@(#)referenceProcessor.hpp 1.43 07/05/05 17:05:54 JVM" #endif /* ! * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,10 ---- #ifdef USE_PRAGMA_IDENT_HDR #pragma ident "@(#)referenceProcessor.hpp 1.43 07/05/05 17:05:54 JVM" #endif /* ! * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 24,34 **** * have any questions. * */ // ReferenceProcessor class encapsulates the per-"collector" processing ! // of "weak" references for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple // generations that are being independently collected -- possibly // concurrently and/or incrementally. Note, however, that the // ReferenceProcessor class abstracts away from a generational setting // by using only a heap interval (called "span" below), thus allowing --- 24,34 ---- * have any questions. * */ // ReferenceProcessor class encapsulates the per-"collector" processing ! // of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple // generations that are being independently collected -- possibly // concurrently and/or incrementally. Note, however, that the // ReferenceProcessor class abstracts away from a generational setting // by using only a heap interval (called "span" below), thus allowing
*** 46,66 **** class ReferencePolicy; class AbstractRefProcTaskExecutor; class DiscoveredList; class ReferenceProcessor : public CHeapObj { - friend class DiscoveredList; - friend class DiscoveredListIterator; protected: // End of list marker static oop _sentinelRef; MemRegion _span; // (right-open) interval of heap // subject to wkref discovery bool _discovering_refs; // true when discovery enabled bool _discovery_is_atomic; // if discovery is atomic wrt // other collectors in configuration bool _discovery_is_mt; // true if reference discovery is MT. bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. int _next_id; // round-robin counter in // support of work distribution --- 46,72 ---- class ReferencePolicy; class AbstractRefProcTaskExecutor; class DiscoveredList; class ReferenceProcessor : public CHeapObj { protected: // End of list marker static oop _sentinelRef; MemRegion _span; // (right-open) interval of heap // subject to wkref discovery bool _discovering_refs; // true when discovery enabled bool _discovery_is_atomic; // if discovery is atomic wrt // other collectors in configuration bool _discovery_is_mt; // true if reference discovery is MT. + // If true, setting "next" field of a discovered refs list requires + // write barrier(s). (Must be true if used in a collector in which + // elements of a discovered list may be moved during discovery: for + // example, a collector like Garbage-First that moves objects during a + // long-term concurrent marking phase that does weak reference + // discovery.) + bool _discovered_list_needs_barrier; + BarrierSet* _bs; // Cached copy of BarrierSet. bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. int _next_id; // round-robin counter in // support of work distribution
*** 70,90 **** // helps the reference processor determine the reachability // of an oop (the field is currently initialized to NULL for // all collectors but the CMS collector). BoolObjectClosure* _is_alive_non_header; // The discovered ref lists themselves ! int _num_q; // the MT'ness degree of the queues below ! DiscoveredList* _discoveredSoftRefs; // pointer to array of oops DiscoveredList* _discoveredWeakRefs; DiscoveredList* _discoveredFinalRefs; DiscoveredList* _discoveredPhantomRefs; public: int num_q() { return _num_q; } DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } ! static oop* sentinel_ref() { return &_sentinelRef; } public: // Process references with a certain reachability level. void process_discovered_reflist(DiscoveredList refs_lists[], ReferencePolicy* policy, --- 76,114 ---- // helps the reference processor determine the reachability // of an oop (the field is currently initialized to NULL for // all collectors but the CMS collector). BoolObjectClosure* _is_alive_non_header; + // Soft ref clearing policies + // . the default policy + static ReferencePolicy* _default_soft_ref_policy; + // . the "clear all" policy + static ReferencePolicy* _always_clear_soft_ref_policy; + // . the current policy below is either one of the above + ReferencePolicy* _current_soft_ref_policy; + // The discovered ref lists themselves ! ! // The MT'ness degree of the queues below ! int _num_q; ! // Arrays of lists of oops, one per thread ! DiscoveredList* _discoveredSoftRefs; DiscoveredList* _discoveredWeakRefs; DiscoveredList* _discoveredFinalRefs; DiscoveredList* _discoveredPhantomRefs; public: int num_q() { return _num_q; } DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } ! static oop sentinel_ref() { return _sentinelRef; } ! static oop* adr_sentinel_ref() { return &_sentinelRef; } ! ReferencePolicy* setup_policy(bool always_clear) { ! _current_soft_ref_policy = always_clear ? ! _always_clear_soft_ref_policy : _default_soft_ref_policy; ! _current_soft_ref_policy->setup(); // snapshot the policy threshold ! return _current_soft_ref_policy; ! } public: // Process references with a certain reachability level. void process_discovered_reflist(DiscoveredList refs_lists[], ReferencePolicy* policy,
*** 99,147 **** VoidClosure* complete_gc); // Work methods used by the method process_discovered_reflist // Phase1: keep alive all those referents that are otherwise // dead but which must be kept alive by policy (and their closure). ! void process_phase1(DiscoveredList& refs_list_addr, ReferencePolicy* policy, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase2: remove all those references whose referents are // reachable. ! inline void process_phase2(DiscoveredList& refs_list_addr, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { if (discovery_is_atomic()) { // complete_gc is ignored in this case for this phase ! pp2_work(refs_list_addr, is_alive, keep_alive); } else { assert(complete_gc != NULL, "Error"); ! pp2_work_concurrent_discovery(refs_list_addr, is_alive, keep_alive, complete_gc); } } // Work methods in support of process_phase2 ! void pp2_work(DiscoveredList& refs_list_addr, BoolObjectClosure* is_alive, OopClosure* keep_alive); void pp2_work_concurrent_discovery( ! DiscoveredList& refs_list_addr, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase3: process the referents by either clearing them // or keeping them alive (and their closure) ! void process_phase3(DiscoveredList& refs_list_addr, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Enqueue references with a certain reachability level ! void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr); // "Preclean" all the discovered reference lists // by removing references with strongly reachable referents. // The first argument is a predicate on an oop that indicates // its (strong) reachability and the second is a closure that --- 123,171 ---- VoidClosure* complete_gc); // Work methods used by the method process_discovered_reflist // Phase1: keep alive all those referents that are otherwise // dead but which must be kept alive by policy (and their closure). ! void process_phase1(DiscoveredList& refs_list, ReferencePolicy* policy, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase2: remove all those references whose referents are // reachable. ! inline void process_phase2(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { if (discovery_is_atomic()) { // complete_gc is ignored in this case for this phase ! pp2_work(refs_list, is_alive, keep_alive); } else { assert(complete_gc != NULL, "Error"); ! pp2_work_concurrent_discovery(refs_list, is_alive, keep_alive, complete_gc); } } // Work methods in support of process_phase2 ! void pp2_work(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive); void pp2_work_concurrent_discovery( ! DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase3: process the referents by either clearing them // or keeping them alive (and their closure) ! void process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Enqueue references with a certain reachability level ! void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); // "Preclean" all the discovered reference lists // by removing references with strongly reachable referents. // The first argument is a predicate on an oop that indicates // its (strong) reachability and the second is a closure that
*** 170,204 **** // Returns the name of the discovered reference list // occupying the i / _num_q slot. const char* list_name(int i); protected: // "Preclean" the given discovered reference list // by removing references with strongly reachable referents. // Currently used in support of CMS only. void preclean_discovered_reflist(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); - void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); int next_id() { int id = _next_id; if (++_next_id == _num_q) { _next_id = 0; } return id; } DiscoveredList* get_discovered_list(ReferenceType rt); inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, ! oop* discovered_addr); void verify_ok_to_handle_reflists() PRODUCT_RETURN; void abandon_partial_discovered_list(DiscoveredList& refs_list); - void abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]); // Calculate the number of jni handles. unsigned int count_jni_refs(); // Balances reference queues. --- 194,228 ---- // Returns the name of the discovered reference list // occupying the i / _num_q slot. const char* list_name(int i); + void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); + protected: // "Preclean" the given discovered reference list // by removing references with strongly reachable referents. // Currently used in support of CMS only. void preclean_discovered_reflist(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); int next_id() { int id = _next_id; if (++_next_id == _num_q) { _next_id = 0; } return id; } DiscoveredList* get_discovered_list(ReferenceType rt); inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, ! HeapWord* discovered_addr); void verify_ok_to_handle_reflists() PRODUCT_RETURN; void abandon_partial_discovered_list(DiscoveredList& refs_list); // Calculate the number of jni handles. unsigned int count_jni_refs(); // Balances reference queues.
*** 215,243 **** _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), _discovering_refs(false), _discovery_is_atomic(true), _enqueuing_is_done(false), _discovery_is_mt(false), _is_alive_non_header(NULL), _num_q(0), _processing_is_mt(false), _next_id(0) {} ReferenceProcessor(MemRegion span, bool atomic_discovery, ! bool mt_discovery, int mt_degree = 1, ! bool mt_processing = false); // Allocates and initializes a reference processor. static ReferenceProcessor* create_ref_processor( MemRegion span, bool atomic_discovery, bool mt_discovery, BoolObjectClosure* is_alive_non_header = NULL, int parallel_gc_threads = 1, ! bool mt_processing = false); ! // RefDiscoveryPolicy values enum { ReferenceBasedDiscovery = 0, ReferentBasedDiscovery = 1 }; --- 239,271 ---- _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), _discovering_refs(false), _discovery_is_atomic(true), _enqueuing_is_done(false), _discovery_is_mt(false), + _discovered_list_needs_barrier(false), + _bs(NULL), _is_alive_non_header(NULL), _num_q(0), _processing_is_mt(false), _next_id(0) {} ReferenceProcessor(MemRegion span, bool atomic_discovery, ! bool mt_discovery, ! int mt_degree = 1, ! bool mt_processing = false, ! bool discovered_list_needs_barrier = false); // Allocates and initializes a reference processor. static ReferenceProcessor* create_ref_processor( MemRegion span, bool atomic_discovery, bool mt_discovery, BoolObjectClosure* is_alive_non_header = NULL, int parallel_gc_threads = 1, ! bool mt_processing = false, ! bool discovered_list_needs_barrier = false); // RefDiscoveryPolicy values enum { ReferenceBasedDiscovery = 0, ReferentBasedDiscovery = 1 };
*** 284,303 **** // Discover a Reference object, using appropriate discovery criteria bool discover_reference(oop obj, ReferenceType rt); // Process references found during GC (called by the garbage collector) ! void process_discovered_references(ReferencePolicy* policy, ! BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor); public: // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); // debugging void verify_no_references_recorded() PRODUCT_RETURN; static void verify(); // clear the discovered lists (unlinking each entry). --- 312,335 ---- // Discover a Reference object, using appropriate discovery criteria bool discover_reference(oop obj, ReferenceType rt); // Process references found during GC (called by the garbage collector) ! void process_discovered_references(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor); public: // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); + // If a discovery is in process that is being superceded, abandon it: all + // the discovered lists will be empty, and all the objects on them will + // have NULL discovered fields. Must be called only at a safepoint. + void abandon_partial_discovery(); + // debugging void verify_no_references_recorded() PRODUCT_RETURN; static void verify(); // clear the discovered lists (unlinking each entry).
*** 478,488 **** // Abstract reference processing task to execute. class AbstractRefProcTaskExecutor::EnqueueTask { protected: EnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList refs_lists[], ! oop* pending_list_addr, oop sentinel_ref, int n_queues) : _ref_processor(ref_processor), _refs_lists(refs_lists), _pending_list_addr(pending_list_addr), --- 510,520 ---- // Abstract reference processing task to execute. class AbstractRefProcTaskExecutor::EnqueueTask { protected: EnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList refs_lists[], ! HeapWord* pending_list_addr, oop sentinel_ref, int n_queues) : _ref_processor(ref_processor), _refs_lists(refs_lists), _pending_list_addr(pending_list_addr),
*** 494,503 **** virtual void work(unsigned int work_id) = 0; protected: ReferenceProcessor& _ref_processor; DiscoveredList* _refs_lists; ! oop* _pending_list_addr; oop _sentinel_ref; int _n_queues; }; --- 526,535 ---- virtual void work(unsigned int work_id) = 0; protected: ReferenceProcessor& _ref_processor; DiscoveredList* _refs_lists; ! HeapWord* _pending_list_addr; oop _sentinel_ref; int _n_queues; };