src/share/vm/memory/referenceProcessor.hpp

Print this page




  54  protected:
  55   // End of list marker
  56   static oop  _sentinelRef;
  57   MemRegion   _span; // (right-open) interval of heap
  58                      // subject to wkref discovery
  59   bool        _discovering_refs;      // true when discovery enabled
  60   bool        _discovery_is_atomic;   // if discovery is atomic wrt
  61                                       // other collectors in configuration
  62   bool        _discovery_is_mt;       // true if reference discovery is MT.
  63   // If true, setting "next" field of a discovered refs list requires
  64   // write barrier(s).  (Must be true if used in a collector in which
  65   // elements of a discovered list may be moved during discovery: for
  66   // example, a collector like Garbage-First that moves objects during a
  67   // long-term concurrent marking phase that does weak reference
  68   // discovery.)
  69   bool        _discovered_list_needs_barrier;
  70   BarrierSet* _bs;                    // Cached copy of BarrierSet.
  71   bool        _enqueuing_is_done;     // true if all weak references enqueued
  72   bool        _processing_is_mt;      // true during phases when
  73                                       // reference processing is MT.
  74   int         _next_id;               // round-robin counter in
  75                                       // support of work distribution
  76 
  77   // For collectors that do not keep GC marking information
  78   // in the object header, this field holds a closure that
  79   // helps the reference processor determine the reachability
  80   // of an oop (the field is currently initialized to NULL for
  81   // all collectors but the CMS collector).
  82   BoolObjectClosure* _is_alive_non_header;
  83 
  84   // Soft ref clearing policies
  85   // . the default policy
  86   static ReferencePolicy*   _default_soft_ref_policy;
  87   // . the "clear all" policy
  88   static ReferencePolicy*   _always_clear_soft_ref_policy;
  89   // . the current policy below is either one of the above
  90   ReferencePolicy*          _current_soft_ref_policy;
  91 
  92   // The discovered ref lists themselves
  93 
  94   // The active MT'ness degree of the queues below
  95   int             _num_q;
  96   // The maximum MT'ness degree of the queues below
  97   int             _max_num_q;
  98   // Arrays of lists of oops, one per thread
  99   DiscoveredList* _discoveredSoftRefs;
 100   DiscoveredList* _discoveredWeakRefs;
 101   DiscoveredList* _discoveredFinalRefs;
 102   DiscoveredList* _discoveredPhantomRefs;
 103 
 104  public:
 105   int num_q()                            { return _num_q; }
 106   void set_mt_degree(int v)              { _num_q = v; }

 107   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 108   static oop  sentinel_ref()             { return _sentinelRef; }
 109   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 110   ReferencePolicy* setup_policy(bool always_clear) {
 111     _current_soft_ref_policy = always_clear ?
 112       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 113     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 114     return _current_soft_ref_policy;
 115   }
 116 
 117  public:
 118   // Process references with a certain reachability level.
 119   void process_discovered_reflist(DiscoveredList               refs_lists[],
 120                                   ReferencePolicy*             policy,
 121                                   bool                         clear_referent,
 122                                   BoolObjectClosure*           is_alive,
 123                                   OopClosure*                  keep_alive,
 124                                   VoidClosure*                 complete_gc,
 125                                   AbstractRefProcTaskExecutor* task_executor);
 126 


 199   // if and only if its "next" field is NULL.
 200   void clean_up_discovered_references();
 201   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 202 
 203   // Returns the name of the discovered reference list
 204   // occupying the i / _num_q slot.
 205   const char* list_name(int i);
 206 
 207   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 208 
 209  protected:
 210   // "Preclean" the given discovered reference list
 211   // by removing references with strongly reachable referents.
 212   // Currently used in support of CMS only.
 213   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 214                                    BoolObjectClosure* is_alive,
 215                                    OopClosure*        keep_alive,
 216                                    VoidClosure*       complete_gc,
 217                                    YieldClosure*      yield);
 218 

 219   int next_id() {
 220     int id = _next_id;
 221     if (++_next_id == _num_q) {
 222       _next_id = 0;
 223     }
 224     return id;
 225   }
 226   DiscoveredList* get_discovered_list(ReferenceType rt);
 227   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 228                                         HeapWord* discovered_addr);
 229   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 230 
 231   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 232 
 233   // Calculate the number of jni handles.
 234   unsigned int count_jni_refs();
 235 
 236   // Balances reference queues.
 237   void balance_queues(DiscoveredList ref_lists[]);
 238 
 239   // Update (advance) the soft ref master clock field.
 240   void update_soft_ref_master_clock();
 241 
 242  public:
 243   // constructor
 244   ReferenceProcessor():
 245     _span((HeapWord*)NULL, (HeapWord*)NULL),
 246     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
 247     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
 248     _discovering_refs(false),
 249     _discovery_is_atomic(true),
 250     _enqueuing_is_done(false),
 251     _discovery_is_mt(false),
 252     _discovered_list_needs_barrier(false),
 253     _bs(NULL),
 254     _is_alive_non_header(NULL),
 255     _num_q(0),
 256     _max_num_q(0),
 257     _processing_is_mt(false),
 258     _next_id(0)
 259   {}
 260 
 261   ReferenceProcessor(MemRegion span, bool atomic_discovery,
 262                      bool mt_discovery,
 263                      int mt_degree = 1,
 264                      bool mt_processing = false,
 265                      bool discovered_list_needs_barrier = false);
 266 
 267   // Allocates and initializes a reference processor.
 268   static ReferenceProcessor* create_ref_processor(
 269     MemRegion          span,
 270     bool               atomic_discovery,
 271     bool               mt_discovery,
 272     BoolObjectClosure* is_alive_non_header = NULL,
 273     int                parallel_gc_threads = 1,
 274     bool               mt_processing = false,
 275     bool               discovered_list_needs_barrier = false);
 276 
 277   // RefDiscoveryPolicy values
 278   enum DiscoveryPolicy {
 279     ReferenceBasedDiscovery = 0,
 280     ReferentBasedDiscovery  = 1,
 281     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 282     DiscoveryPolicyMax      = ReferentBasedDiscovery
 283   };
 284 
 285   static void init_statics();
 286 
 287  public:
 288   // get and set "is_alive_non_header" field
 289   BoolObjectClosure* is_alive_non_header() {
 290     return _is_alive_non_header;
 291   }
 292   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 293     _is_alive_non_header = is_alive_non_header;
 294   }


 380  private:
 381   ReferenceProcessor* _rp;
 382   MemRegion           _saved_span;
 383 
 384  public:
 385   ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
 386                                 MemRegion span):
 387     _rp(rp) {
 388     _saved_span = _rp->span();
 389     _rp->set_span(span);
 390   }
 391 
 392   ~ReferenceProcessorSpanMutator() {
 393     _rp->set_span(_saved_span);
 394   }
 395 };
 396 
 397 // A utility class to temporarily change the MT'ness of
 398 // reference discovery for the given ReferenceProcessor
 399 // in the scope that contains it.
 400 class ReferenceProcessorMTMutator: StackObj {
 401  private:
 402   ReferenceProcessor* _rp;
 403   bool                _saved_mt;
 404 
 405  public:
 406   ReferenceProcessorMTMutator(ReferenceProcessor* rp,
 407                               bool mt):
 408     _rp(rp) {
 409     _saved_mt = _rp->discovery_is_mt();
 410     _rp->set_mt_discovery(mt);
 411   }
 412 
 413   ~ReferenceProcessorMTMutator() {
 414     _rp->set_mt_discovery(_saved_mt);
 415   }
 416 };
 417 
 418 
 419 // A utility class to temporarily change the disposition
 420 // of the "is_alive_non_header" closure field of the
 421 // given ReferenceProcessor in the scope that contains it.
 422 class ReferenceProcessorIsAliveMutator: StackObj {
 423  private:
 424   ReferenceProcessor* _rp;
 425   BoolObjectClosure*  _saved_cl;
 426 
 427  public:
 428   ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
 429                                    BoolObjectClosure*  cl):
 430     _rp(rp) {
 431     _saved_cl = _rp->is_alive_non_header();
 432     _rp->set_is_alive_non_header(cl);
 433   }




  54  protected:
  55   // End of list marker
  56   static oop  _sentinelRef;
  57   MemRegion   _span; // (right-open) interval of heap
  58                      // subject to wkref discovery
  59   bool        _discovering_refs;      // true when discovery enabled
  60   bool        _discovery_is_atomic;   // if discovery is atomic wrt
  61                                       // other collectors in configuration
  62   bool        _discovery_is_mt;       // true if reference discovery is MT.
  63   // If true, setting "next" field of a discovered refs list requires
  64   // write barrier(s).  (Must be true if used in a collector in which
  65   // elements of a discovered list may be moved during discovery: for
  66   // example, a collector like Garbage-First that moves objects during a
  67   // long-term concurrent marking phase that does weak reference
  68   // discovery.)
  69   bool        _discovered_list_needs_barrier;
  70   BarrierSet* _bs;                    // Cached copy of BarrierSet.
  71   bool        _enqueuing_is_done;     // true if all weak references enqueued
  72   bool        _processing_is_mt;      // true during phases when
  73                                       // reference processing is MT.
  74   int         _next_id;               // round-robin mod _num_q counter in
  75                                       // support of work distribution
  76 
  77   // For collectors that do not keep GC marking information
  78   // in the object header, this field holds a closure that
  79   // helps the reference processor determine the reachability
  80   // of an oop (the field is currently initialized to NULL for
  81   // all collectors but the CMS collector).
  82   BoolObjectClosure* _is_alive_non_header;
  83 
  84   // Soft ref clearing policies
  85   // . the default policy
  86   static ReferencePolicy*   _default_soft_ref_policy;
  87   // . the "clear all" policy
  88   static ReferencePolicy*   _always_clear_soft_ref_policy;
  89   // . the current policy below is either one of the above
  90   ReferencePolicy*          _current_soft_ref_policy;
  91 
  92   // The discovered ref lists themselves
  93 
  94   // The active MT'ness degree of the queues below
  95   int             _num_q;
  96   // The maximum MT'ness degree of the queues below
  97   int             _max_num_q;
  98   // Arrays of lists of oops, one per thread
  99   DiscoveredList* _discoveredSoftRefs;
 100   DiscoveredList* _discoveredWeakRefs;
 101   DiscoveredList* _discoveredFinalRefs;
 102   DiscoveredList* _discoveredPhantomRefs;
 103 
 104  public:
 105   int num_q()                            { return _num_q; }
 106   int max_num_q()                        { return _max_num_q; }
 107   void set_active_mt_degree(int v)       { _num_q = v; }
 108   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 109   static oop  sentinel_ref()             { return _sentinelRef; }
 110   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 111   ReferencePolicy* setup_policy(bool always_clear) {
 112     _current_soft_ref_policy = always_clear ?
 113       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 114     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 115     return _current_soft_ref_policy;
 116   }
 117 
 118  public:
 119   // Process references with a certain reachability level.
 120   void process_discovered_reflist(DiscoveredList               refs_lists[],
 121                                   ReferencePolicy*             policy,
 122                                   bool                         clear_referent,
 123                                   BoolObjectClosure*           is_alive,
 124                                   OopClosure*                  keep_alive,
 125                                   VoidClosure*                 complete_gc,
 126                                   AbstractRefProcTaskExecutor* task_executor);
 127 


 200   // if and only if its "next" field is NULL.
 201   void clean_up_discovered_references();
 202   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 203 
 204   // Returns the name of the discovered reference list
 205   // occupying the i / _num_q slot.
 206   const char* list_name(int i);
 207 
 208   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 209 
 210  protected:
 211   // "Preclean" the given discovered reference list
 212   // by removing references with strongly reachable referents.
 213   // Currently used in support of CMS only.
 214   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 215                                    BoolObjectClosure* is_alive,
 216                                    OopClosure*        keep_alive,
 217                                    VoidClosure*       complete_gc,
 218                                    YieldClosure*      yield);
 219 
 220   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 221   int next_id() {
 222     int id = _next_id;
 223     if (++_next_id == _num_q) {
 224       _next_id = 0;
 225     }
 226     return id;
 227   }
 228   DiscoveredList* get_discovered_list(ReferenceType rt);
 229   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 230                                         HeapWord* discovered_addr);
 231   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 232 
 233   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 234 
 235   // Calculate the number of jni handles.
 236   unsigned int count_jni_refs();
 237 
 238   // Balances reference queues.
 239   void balance_queues(DiscoveredList ref_lists[]);
 240 
 241   // Update (advance) the soft ref master clock field.
 242   void update_soft_ref_master_clock();
 243 
 244  public:
 245   // constructor
 246   ReferenceProcessor():
 247     _span((HeapWord*)NULL, (HeapWord*)NULL),
 248     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
 249     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
 250     _discovering_refs(false),
 251     _discovery_is_atomic(true),
 252     _enqueuing_is_done(false),
 253     _discovery_is_mt(false),
 254     _discovered_list_needs_barrier(false),
 255     _bs(NULL),
 256     _is_alive_non_header(NULL),
 257     _num_q(0),
 258     _max_num_q(0),
 259     _processing_is_mt(false),
 260     _next_id(0)
 261   { }
 262 
 263   // Default parameters give you a vanilla reference processor.
 264   ReferenceProcessor(MemRegion span,
 265                      bool mt_processing = false, int mt_processing_degree = 1,
 266                      bool mt_discovery  = false, int mt_discovery_degree  = 1,
 267                      bool atomic_discovery = true,






 268                      BoolObjectClosure* is_alive_non_header = NULL,


 269                      bool discovered_list_needs_barrier = false);
 270 
 271   // RefDiscoveryPolicy values
 272   enum DiscoveryPolicy {
 273     ReferenceBasedDiscovery = 0,
 274     ReferentBasedDiscovery  = 1,
 275     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 276     DiscoveryPolicyMax      = ReferentBasedDiscovery
 277   };
 278 
 279   static void init_statics();
 280 
 281  public:
 282   // get and set "is_alive_non_header" field
 283   BoolObjectClosure* is_alive_non_header() {
 284     return _is_alive_non_header;
 285   }
 286   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 287     _is_alive_non_header = is_alive_non_header;
 288   }


 374  private:
 375   ReferenceProcessor* _rp;
 376   MemRegion           _saved_span;
 377 
 378  public:
 379   ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
 380                                 MemRegion span):
 381     _rp(rp) {
 382     _saved_span = _rp->span();
 383     _rp->set_span(span);
 384   }
 385 
 386   ~ReferenceProcessorSpanMutator() {
 387     _rp->set_span(_saved_span);
 388   }
 389 };
 390 
 391 // A utility class to temporarily change the MT'ness of
 392 // reference discovery for the given ReferenceProcessor
 393 // in the scope that contains it.
 394 class ReferenceProcessorMTDiscoveryMutator: StackObj {
 395  private:
 396   ReferenceProcessor* _rp;
 397   bool                _saved_mt;
 398 
 399  public:
 400   ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
 401                                        bool mt):
 402     _rp(rp) {
 403     _saved_mt = _rp->discovery_is_mt();
 404     _rp->set_mt_discovery(mt);
 405   }
 406 
 407   ~ReferenceProcessorMTDiscoveryMutator() {
 408     _rp->set_mt_discovery(_saved_mt);
 409   }
 410 };
 411 
 412 
 413 // A utility class to temporarily change the disposition
 414 // of the "is_alive_non_header" closure field of the
 415 // given ReferenceProcessor in the scope that contains it.
 416 class ReferenceProcessorIsAliveMutator: StackObj {
 417  private:
 418   ReferenceProcessor* _rp;
 419   BoolObjectClosure*  _saved_cl;
 420 
 421  public:
 422   ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
 423                                    BoolObjectClosure*  cl):
 424     _rp(rp) {
 425     _saved_cl = _rp->is_alive_non_header();
 426     _rp->set_is_alive_non_header(cl);
 427   }