src/share/vm/memory/referenceProcessor.hpp

Print this page
rev 2652 : 7085906: Replace the permgen allocated sentinelRef with a self-looped end
Summary: Remove the sentinelRef and let the last Reference in a discovered chain point back to itself.
Reviewed-by: TBD1, TBD2


  35 // concurrently and/or incrementally.  Note, however, that the
  36 // ReferenceProcessor class abstracts away from a generational setting
  37 // by using only a heap interval (called "span" below), thus allowing
  38 // its use in a straightforward manner in a general, non-generational
  39 // setting.
  40 //
  41 // The basic idea is that each ReferenceProcessor object concerns
  42 // itself with ("weak") reference processing in a specific "span"
  43 // of the heap of interest to a specific collector. Currently,
  44 // the span is a convex interval of the heap, but, efficiency
  45 // apart, there seems to be no reason it couldn't be extended
  46 // (with appropriate modifications) to any "non-convex interval".
  47 
  48 // forward references
  49 class ReferencePolicy;
  50 class AbstractRefProcTaskExecutor;
  51 class DiscoveredList;
  52 
  53 class ReferenceProcessor : public CHeapObj {
  54  protected:
  55   // End of list marker
  56   static oop  _sentinelRef;
  57   MemRegion   _span; // (right-open) interval of heap
  58                      // subject to wkref discovery
  59   bool        _discovering_refs;      // true when discovery enabled
  60   bool        _discovery_is_atomic;   // if discovery is atomic wrt
  61                                       // other collectors in configuration
  62   bool        _discovery_is_mt;       // true if reference discovery is MT.
  63   // If true, setting "next" field of a discovered refs list requires
  64   // write barrier(s).  (Must be true if used in a collector in which
  65   // elements of a discovered list may be moved during discovery: for
  66   // example, a collector like Garbage-First that moves objects during a
  67   // long-term concurrent marking phase that does weak reference
  68   // discovery.)
  69   bool        _discovered_list_needs_barrier;
  70   BarrierSet* _bs;                    // Cached copy of BarrierSet.
  71   bool        _enqueuing_is_done;     // true if all weak references enqueued
  72   bool        _processing_is_mt;      // true during phases when
  73                                       // reference processing is MT.
  74   int         _next_id;               // round-robin mod _num_q counter in
  75                                       // support of work distribution
  76 


  89   // . the current policy below is either one of the above
  90   ReferencePolicy*          _current_soft_ref_policy;
  91 
  92   // The discovered ref lists themselves
  93 
  94   // The active MT'ness degree of the queues below
  95   int             _num_q;
  96   // The maximum MT'ness degree of the queues below
  97   int             _max_num_q;
  98   // Arrays of lists of oops, one per thread
  99   DiscoveredList* _discoveredSoftRefs;
 100   DiscoveredList* _discoveredWeakRefs;
 101   DiscoveredList* _discoveredFinalRefs;
 102   DiscoveredList* _discoveredPhantomRefs;
 103 
 104  public:
 105   int num_q()                            { return _num_q; }
 106   int max_num_q()                        { return _max_num_q; }
 107   void set_active_mt_degree(int v)       { _num_q = v; }
 108   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 109   static oop  sentinel_ref()             { return _sentinelRef; }
 110   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 111   ReferencePolicy* setup_policy(bool always_clear) {
 112     _current_soft_ref_policy = always_clear ?
 113       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 114     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 115     return _current_soft_ref_policy;
 116   }
 117 
 118  public:
 119   // Process references with a certain reachability level.
 120   void process_discovered_reflist(DiscoveredList               refs_lists[],
 121                                   ReferencePolicy*             policy,
 122                                   bool                         clear_referent,
 123                                   BoolObjectClosure*           is_alive,
 124                                   OopClosure*                  keep_alive,
 125                                   VoidClosure*                 complete_gc,
 126                                   AbstractRefProcTaskExecutor* task_executor);
 127 
 128   void process_phaseJNI(BoolObjectClosure* is_alive,
 129                         OopClosure*        keep_alive,
 130                         VoidClosure*       complete_gc);


 213   // Currently used in support of CMS only.
 214   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 215                                    BoolObjectClosure* is_alive,
 216                                    OopClosure*        keep_alive,
 217                                    VoidClosure*       complete_gc,
 218                                    YieldClosure*      yield);
 219 
 220   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 221   int next_id() {
 222     int id = _next_id;
 223     if (++_next_id == _num_q) {
 224       _next_id = 0;
 225     }
 226     return id;
 227   }
 228   DiscoveredList* get_discovered_list(ReferenceType rt);
 229   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 230                                         HeapWord* discovered_addr);
 231   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 232 

 233   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 234 
 235   // Calculate the number of jni handles.
 236   unsigned int count_jni_refs();
 237 
 238   // Balances reference queues.
 239   void balance_queues(DiscoveredList ref_lists[]);
 240 
 241   // Update (advance) the soft ref master clock field.
 242   void update_soft_ref_master_clock();
 243 
 244  public:
 245   // constructor
 246   ReferenceProcessor():
 247     _span((HeapWord*)NULL, (HeapWord*)NULL),
 248     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
 249     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
 250     _discovering_refs(false),
 251     _discovery_is_atomic(true),
 252     _enqueuing_is_done(false),


 297   bool discovery_enabled()  { return _discovering_refs;  }
 298 
 299   // whether discovery is atomic wrt other collectors
 300   bool discovery_is_atomic() const { return _discovery_is_atomic; }
 301   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 302 
 303   // whether discovery is done by multiple threads same-old-timeously
 304   bool discovery_is_mt() const { return _discovery_is_mt; }
 305   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
 306 
 307   // Whether we are in a phase when _processing_ is MT.
 308   bool processing_is_mt() const { return _processing_is_mt; }
 309   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 310 
 311   // whether all enqueuing of weak references is complete
 312   bool enqueuing_is_done()  { return _enqueuing_is_done; }
 313   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 314 
 315   // iterate over oops
 316   void weak_oops_do(OopClosure* f);       // weak roots
 317   static void oops_do(OopClosure* f);     // strong root(s)
 318 
 319   // Balance each of the discovered lists.
 320   void balance_all_queues();
 321 
 322   // Discover a Reference object, using appropriate discovery criteria
 323   bool discover_reference(oop obj, ReferenceType rt);
 324 
 325   // Process references found during GC (called by the garbage collector)
 326   void process_discovered_references(BoolObjectClosure*           is_alive,
 327                                      OopClosure*                  keep_alive,
 328                                      VoidClosure*                 complete_gc,
 329                                      AbstractRefProcTaskExecutor* task_executor);
 330 
 331  public:
 332   // Enqueue references at end of GC (called by the garbage collector)
 333   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
 334 
 335   // If a discovery is in process that is being superceded, abandon it: all
 336   // the discovered lists will be empty, and all the objects on them will
 337   // have NULL discovered fields.  Must be called only at a safepoint.
 338   void abandon_partial_discovery();
 339 
 340   // debugging
 341   void verify_no_references_recorded() PRODUCT_RETURN;
 342   void verify_referent(oop obj)        PRODUCT_RETURN;
 343   static void verify();
 344 
 345   // clear the discovered lists (unlinking each entry).
 346   void clear_discovered_references() PRODUCT_RETURN;
 347 };
 348 
 349 // A utility class to disable reference discovery in
 350 // the scope which contains it, for given ReferenceProcessor.
 351 class NoRefDiscovery: StackObj {
 352  private:
 353   ReferenceProcessor* _rp;
 354   bool _was_discovering_refs;
 355  public:
 356   NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
 357     _was_discovering_refs = _rp->discovery_enabled();
 358     if (_was_discovering_refs) {
 359       _rp->disable_discovery();
 360     }
 361   }
 362 
 363   ~NoRefDiscovery() {


 507   virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
 508                     OopClosure& keep_alive,
 509                     VoidClosure& complete_gc) = 0;
 510 
 511   // Returns true if a task marks some oops as alive.
 512   bool marks_oops_alive() const
 513   { return _marks_oops_alive; }
 514 
 515 protected:
 516   ReferenceProcessor& _ref_processor;
 517   DiscoveredList*     _refs_lists;
 518   const bool          _marks_oops_alive;
 519 };
 520 
 521 // Abstract reference processing task to execute.
 522 class AbstractRefProcTaskExecutor::EnqueueTask {
 523 protected:
 524   EnqueueTask(ReferenceProcessor& ref_processor,
 525               DiscoveredList      refs_lists[],
 526               HeapWord*           pending_list_addr,
 527               oop                 sentinel_ref,
 528               int                 n_queues)
 529     : _ref_processor(ref_processor),
 530       _refs_lists(refs_lists),
 531       _pending_list_addr(pending_list_addr),
 532       _sentinel_ref(sentinel_ref),
 533       _n_queues(n_queues)
 534   { }
 535 
 536 public:
 537   virtual void work(unsigned int work_id) = 0;
 538 
 539 protected:
 540   ReferenceProcessor& _ref_processor;
 541   DiscoveredList*     _refs_lists;
 542   HeapWord*           _pending_list_addr;
 543   oop                 _sentinel_ref;
 544   int                 _n_queues;
 545 };
 546 
 547 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP


  35 // concurrently and/or incrementally.  Note, however, that the
  36 // ReferenceProcessor class abstracts away from a generational setting
  37 // by using only a heap interval (called "span" below), thus allowing
  38 // its use in a straightforward manner in a general, non-generational
  39 // setting.
  40 //
  41 // The basic idea is that each ReferenceProcessor object concerns
  42 // itself with ("weak") reference processing in a specific "span"
  43 // of the heap of interest to a specific collector. Currently,
  44 // the span is a convex interval of the heap, but, efficiency
  45 // apart, there seems to be no reason it couldn't be extended
  46 // (with appropriate modifications) to any "non-convex interval".
  47 
  48 // forward references
  49 class ReferencePolicy;
  50 class AbstractRefProcTaskExecutor;
  51 class DiscoveredList;
  52 
  53 class ReferenceProcessor : public CHeapObj {
  54  protected:


  55   MemRegion   _span; // (right-open) interval of heap
  56                      // subject to wkref discovery
  57   bool        _discovering_refs;      // true when discovery enabled
  58   bool        _discovery_is_atomic;   // if discovery is atomic wrt
  59                                       // other collectors in configuration
  60   bool        _discovery_is_mt;       // true if reference discovery is MT.
  61   // If true, setting "next" field of a discovered refs list requires
  62   // write barrier(s).  (Must be true if used in a collector in which
  63   // elements of a discovered list may be moved during discovery: for
  64   // example, a collector like Garbage-First that moves objects during a
  65   // long-term concurrent marking phase that does weak reference
  66   // discovery.)
  67   bool        _discovered_list_needs_barrier;
  68   BarrierSet* _bs;                    // Cached copy of BarrierSet.
  69   bool        _enqueuing_is_done;     // true if all weak references enqueued
  70   bool        _processing_is_mt;      // true during phases when
  71                                       // reference processing is MT.
  72   int         _next_id;               // round-robin mod _num_q counter in
  73                                       // support of work distribution
  74 


  87   // . the current policy below is either one of the above
  88   ReferencePolicy*          _current_soft_ref_policy;
  89 
  90   // The discovered ref lists themselves
  91 
  92   // The active MT'ness degree of the queues below
  93   int             _num_q;
  94   // The maximum MT'ness degree of the queues below
  95   int             _max_num_q;
  96   // Arrays of lists of oops, one per thread
  97   DiscoveredList* _discoveredSoftRefs;
  98   DiscoveredList* _discoveredWeakRefs;
  99   DiscoveredList* _discoveredFinalRefs;
 100   DiscoveredList* _discoveredPhantomRefs;
 101 
 102  public:
 103   int num_q()                            { return _num_q; }
 104   int max_num_q()                        { return _max_num_q; }
 105   void set_active_mt_degree(int v)       { _num_q = v; }
 106   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }


 107   ReferencePolicy* setup_policy(bool always_clear) {
 108     _current_soft_ref_policy = always_clear ?
 109       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 110     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 111     return _current_soft_ref_policy;
 112   }
 113 
 114  public:
 115   // Process references with a certain reachability level.
 116   void process_discovered_reflist(DiscoveredList               refs_lists[],
 117                                   ReferencePolicy*             policy,
 118                                   bool                         clear_referent,
 119                                   BoolObjectClosure*           is_alive,
 120                                   OopClosure*                  keep_alive,
 121                                   VoidClosure*                 complete_gc,
 122                                   AbstractRefProcTaskExecutor* task_executor);
 123 
 124   void process_phaseJNI(BoolObjectClosure* is_alive,
 125                         OopClosure*        keep_alive,
 126                         VoidClosure*       complete_gc);


 209   // Currently used in support of CMS only.
 210   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 211                                    BoolObjectClosure* is_alive,
 212                                    OopClosure*        keep_alive,
 213                                    VoidClosure*       complete_gc,
 214                                    YieldClosure*      yield);
 215 
 216   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 217   int next_id() {
 218     int id = _next_id;
 219     if (++_next_id == _num_q) {
 220       _next_id = 0;
 221     }
 222     return id;
 223   }
 224   DiscoveredList* get_discovered_list(ReferenceType rt);
 225   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 226                                         HeapWord* discovered_addr);
 227   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 228 
 229   void clear_discovered_references(DiscoveredList& refs_list);
 230   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 231 
 232   // Calculate the number of jni handles.
 233   unsigned int count_jni_refs();
 234 
 235   // Balances reference queues.
 236   void balance_queues(DiscoveredList ref_lists[]);
 237 
 238   // Update (advance) the soft ref master clock field.
 239   void update_soft_ref_master_clock();
 240 
 241  public:
 242   // constructor
 243   ReferenceProcessor():
 244     _span((HeapWord*)NULL, (HeapWord*)NULL),
 245     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
 246     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
 247     _discovering_refs(false),
 248     _discovery_is_atomic(true),
 249     _enqueuing_is_done(false),


 294   bool discovery_enabled()  { return _discovering_refs;  }
 295 
 296   // whether discovery is atomic wrt other collectors
 297   bool discovery_is_atomic() const { return _discovery_is_atomic; }
 298   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 299 
 300   // whether discovery is done by multiple threads same-old-timeously
 301   bool discovery_is_mt() const { return _discovery_is_mt; }
 302   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
 303 
 304   // Whether we are in a phase when _processing_ is MT.
 305   bool processing_is_mt() const { return _processing_is_mt; }
 306   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 307 
 308   // whether all enqueuing of weak references is complete
 309   bool enqueuing_is_done()  { return _enqueuing_is_done; }
 310   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 311 
 312   // iterate over oops
 313   void weak_oops_do(OopClosure* f);       // weak roots

 314 
 315   // Balance each of the discovered lists.
 316   void balance_all_queues();
 317 
 318   // Discover a Reference object, using appropriate discovery criteria
 319   bool discover_reference(oop obj, ReferenceType rt);
 320 
 321   // Process references found during GC (called by the garbage collector)
 322   void process_discovered_references(BoolObjectClosure*           is_alive,
 323                                      OopClosure*                  keep_alive,
 324                                      VoidClosure*                 complete_gc,
 325                                      AbstractRefProcTaskExecutor* task_executor);
 326 
 327  public:
 328   // Enqueue references at end of GC (called by the garbage collector)
 329   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
 330 
 331   // If a discovery is in process that is being superceded, abandon it: all
 332   // the discovered lists will be empty, and all the objects on them will
 333   // have NULL discovered fields.  Must be called only at a safepoint.
 334   void abandon_partial_discovery();
 335 
 336   // debugging
 337   void verify_no_references_recorded() PRODUCT_RETURN;
 338   void verify_referent(oop obj)        PRODUCT_RETURN;

 339 
 340   // clear the discovered lists (unlinking each entry).
 341   void clear_discovered_references() PRODUCT_RETURN;
 342 };
 343 
 344 // A utility class to disable reference discovery in
 345 // the scope which contains it, for given ReferenceProcessor.
 346 class NoRefDiscovery: StackObj {
 347  private:
 348   ReferenceProcessor* _rp;
 349   bool _was_discovering_refs;
 350  public:
 351   NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
 352     _was_discovering_refs = _rp->discovery_enabled();
 353     if (_was_discovering_refs) {
 354       _rp->disable_discovery();
 355     }
 356   }
 357 
 358   ~NoRefDiscovery() {


 502   virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
 503                     OopClosure& keep_alive,
 504                     VoidClosure& complete_gc) = 0;
 505 
 506   // Returns true if a task marks some oops as alive.
 507   bool marks_oops_alive() const
 508   { return _marks_oops_alive; }
 509 
 510 protected:
 511   ReferenceProcessor& _ref_processor;
 512   DiscoveredList*     _refs_lists;
 513   const bool          _marks_oops_alive;
 514 };
 515 
 516 // Abstract reference processing task to execute.
 517 class AbstractRefProcTaskExecutor::EnqueueTask {
 518 protected:
 519   EnqueueTask(ReferenceProcessor& ref_processor,
 520               DiscoveredList      refs_lists[],
 521               HeapWord*           pending_list_addr,

 522               int                 n_queues)
 523     : _ref_processor(ref_processor),
 524       _refs_lists(refs_lists),
 525       _pending_list_addr(pending_list_addr),

 526       _n_queues(n_queues)
 527   { }
 528 
 529 public:
 530   virtual void work(unsigned int work_id) = 0;
 531 
 532 protected:
 533   ReferenceProcessor& _ref_processor;
 534   DiscoveredList*     _refs_lists;
 535   HeapWord*           _pending_list_addr;

 536   int                 _n_queues;
 537 };
 538 
 539 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP