src/share/vm/memory/referenceProcessor.hpp

Print this page
rev 2585 : [mq]: g1-reference-processing


  31 // ReferenceProcessor class encapsulates the per-"collector" processing
  32 // of java.lang.Reference objects for GC. The interface is useful for supporting
  33 // a generational abstraction, in particular when there are multiple
  34 // generations that are being independently collected -- possibly
  35 // concurrently and/or incrementally.  Note, however, that the
  36 // ReferenceProcessor class abstracts away from a generational setting
  37 // by using only a heap interval (called "span" below), thus allowing
  38 // its use in a straightforward manner in a general, non-generational
  39 // setting.
  40 //
  41 // The basic idea is that each ReferenceProcessor object concerns
  42 // itself with ("weak") reference processing in a specific "span"
  43 // of the heap of interest to a specific collector. Currently,
  44 // the span is a convex interval of the heap, but, efficiency
  45 // apart, there seems to be no reason it couldn't be extended
  46 // (with appropriate modifications) to any "non-convex interval".
  47 
  48 // forward references
  49 class ReferencePolicy;
  50 class AbstractRefProcTaskExecutor;
  51 class DiscoveredList;



































  52 
  53 class ReferenceProcessor : public CHeapObj {
  54  protected:
  55   // End of list marker
  56   static oop  _sentinelRef;

  57   MemRegion   _span; // (right-open) interval of heap
  58                      // subject to wkref discovery

  59   bool        _discovering_refs;      // true when discovery enabled
  60   bool        _discovery_is_atomic;   // if discovery is atomic wrt
  61                                       // other collectors in configuration
  62   bool        _discovery_is_mt;       // true if reference discovery is MT.

  63   // If true, setting "next" field of a discovered refs list requires
  64   // write barrier(s).  (Must be true if used in a collector in which
  65   // elements of a discovered list may be moved during discovery: for
  66   // example, a collector like Garbage-First that moves objects during a
  67   // long-term concurrent marking phase that does weak reference
  68   // discovery.)
  69   bool        _discovered_list_needs_barrier;

  70   BarrierSet* _bs;                    // Cached copy of BarrierSet.
  71   bool        _enqueuing_is_done;     // true if all weak references enqueued
  72   bool        _processing_is_mt;      // true during phases when
  73                                       // reference processing is MT.
  74   int         _next_id;               // round-robin mod _num_q counter in
  75                                       // support of work distribution
  76 
  77   // For collectors that do not keep GC marking information
  78   // in the object header, this field holds a closure that
  79   // helps the reference processor determine the reachability
  80   // of an oop (the field is currently initialized to NULL for
  81   // all collectors but the CMS collector).
  82   BoolObjectClosure* _is_alive_non_header;
  83 
  84   // Soft ref clearing policies
  85   // . the default policy
  86   static ReferencePolicy*   _default_soft_ref_policy;
  87   // . the "clear all" policy
  88   static ReferencePolicy*   _always_clear_soft_ref_policy;
  89   // . the current policy below is either one of the above
  90   ReferencePolicy*          _current_soft_ref_policy;
  91 
  92   // The discovered ref lists themselves
  93 
  94   // The active MT'ness degree of the queues below
  95   int             _num_q;
  96   // The maximum MT'ness degree of the queues below
  97   int             _max_num_q;
  98   // Arrays of lists of oops, one per thread
  99   DiscoveredList* _discoveredSoftRefs;
 100   DiscoveredList* _discoveredWeakRefs;
 101   DiscoveredList* _discoveredFinalRefs;
 102   DiscoveredList* _discoveredPhantomRefs;
 103 
 104  public:


 105   int num_q()                            { return _num_q; }
 106   int max_num_q()                        { return _max_num_q; }
 107   void set_active_mt_degree(int v)       { _num_q = v; }
 108   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 109   static oop  sentinel_ref()             { return _sentinelRef; }
 110   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 111   ReferencePolicy* setup_policy(bool always_clear) {
 112     _current_soft_ref_policy = always_clear ?
 113       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 114     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 115     return _current_soft_ref_policy;
 116   }
 117 
 118  public:
 119   // Process references with a certain reachability level.
 120   void process_discovered_reflist(DiscoveredList               refs_lists[],
 121                                   ReferencePolicy*             policy,
 122                                   bool                         clear_referent,
 123                                   BoolObjectClosure*           is_alive,
 124                                   OopClosure*                  keep_alive,




  31 // ReferenceProcessor class encapsulates the per-"collector" processing
  32 // of java.lang.Reference objects for GC. The interface is useful for supporting
  33 // a generational abstraction, in particular when there are multiple
  34 // generations that are being independently collected -- possibly
  35 // concurrently and/or incrementally.  Note, however, that the
  36 // ReferenceProcessor class abstracts away from a generational setting
  37 // by using only a heap interval (called "span" below), thus allowing
  38 // its use in a straightforward manner in a general, non-generational
  39 // setting.
  40 //
  41 // The basic idea is that each ReferenceProcessor object concerns
  42 // itself with ("weak") reference processing in a specific "span"
  43 // of the heap of interest to a specific collector. Currently,
  44 // the span is a convex interval of the heap, but, efficiency
  45 // apart, there seems to be no reason it couldn't be extended
  46 // (with appropriate modifications) to any "non-convex interval".
  47 
  48 // forward references
  49 class ReferencePolicy;
  50 class AbstractRefProcTaskExecutor;
  51 
  52 // List of discovered references.
  53 class DiscoveredList {
  54  public:
  55   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
  56   oop head() const     {
  57      return UseCompressedOops ?  oopDesc::decode_heap_oop_not_null(_compressed_head) :
  58                                 _oop_head;
  59   }
  60   HeapWord* adr_head() {
  61     return UseCompressedOops ? (HeapWord*)&_compressed_head :
  62                                (HeapWord*)&_oop_head;
  63   }
  64   void   set_head(oop o) {
  65     if (UseCompressedOops) {
  66       // Must compress the head ptr.
  67       _compressed_head = oopDesc::encode_heap_oop_not_null(o);
  68     } else {
  69       _oop_head = o;
  70     }
  71   }
  72 
  73   bool is_empty() const; 
  74 
  75   size_t length()               { return _len; }
  76   void   set_length(size_t len) { _len = len;  }
  77   void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
  78   void   dec_length(size_t dec) { _len -= dec; }
  79 
  80  private:
  81   // Set value depending on UseCompressedOops. This could be a template class
  82   // but then we have to fix all the instantiations and declarations that use this class.
  83   oop       _oop_head;
  84   narrowOop _compressed_head;
  85   size_t _len;
  86 };
  87 
  88 class ReferenceProcessor : public CHeapObj {
  89  protected:
  90   // End of list marker
  91   static oop  _sentinelRef;
  92 
  93   MemRegion   _span;                    // (right-open) interval of heap
  94                                         // subject to wkref discovery
  95 
  96   bool        _discovering_refs;        // true when discovery enabled
  97   bool        _discovery_is_atomic;     // if discovery is atomic wrt
  98                                         // other collectors in configuration
  99   bool        _discovery_is_mt;         // true if reference discovery is MT.
 100 
 101   // If true, setting "next" field of a discovered refs list requires
 102   // write barrier(s).  (Must be true if used in a collector in which
 103   // elements of a discovered list may be moved during discovery: for
 104   // example, a collector like Garbage-First that moves objects during a
 105   // long-term concurrent marking phase that does weak reference
 106   // discovery.)
 107   bool        _discovered_list_needs_barrier;
 108 
 109   BarrierSet* _bs;                      // Cached copy of BarrierSet.
 110   bool        _enqueuing_is_done;       // true if all weak references enqueued
 111   bool        _processing_is_mt;        // true during phases when
 112                                         // reference processing is MT.
 113   int         _next_id;                 // round-robin mod _num_q counter in
 114                                         // support of work distribution
 115 
 116   // For collectors that do not keep GC liveness information
 117   // in the object header, this field holds a closure that
 118   // helps the reference processor determine the reachability
 119   // of an oop. It is currently initialized to NULL for all
 120   // collectors except for CMS and G1.
 121   BoolObjectClosure* _is_alive_non_header;
 122 
 123   // Soft ref clearing policies
 124   // . the default policy
 125   static ReferencePolicy*   _default_soft_ref_policy;
 126   // . the "clear all" policy
 127   static ReferencePolicy*   _always_clear_soft_ref_policy;
 128   // . the current policy below is either one of the above
 129   ReferencePolicy*          _current_soft_ref_policy;
 130 
 131   // The discovered ref lists themselves
 132 
 133   // The active MT'ness degree of the queues below
 134   int             _num_q;
 135   // The maximum MT'ness degree of the queues below
 136   int             _max_num_q;
 137   // Arrays of lists of oops, one per thread
 138   DiscoveredList* _discoveredSoftRefs;
 139   DiscoveredList* _discoveredWeakRefs;
 140   DiscoveredList* _discoveredFinalRefs;
 141   DiscoveredList* _discoveredPhantomRefs;
 142 
 143  public:
 144   static int subclasses_of_ref()         { return (REF_PHANTOM - REF_OTHER); }
 145 
 146   int num_q()                            { return _num_q; }
 147   int max_num_q()                        { return _max_num_q; }
 148   void set_active_mt_degree(int v)       { _num_q = v; }
 149   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 150   static oop  sentinel_ref()             { return _sentinelRef; }
 151   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 152   ReferencePolicy* setup_policy(bool always_clear) {
 153     _current_soft_ref_policy = always_clear ?
 154       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 155     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 156     return _current_soft_ref_policy;
 157   }
 158 
 159  public:
 160   // Process references with a certain reachability level.
 161   void process_discovered_reflist(DiscoveredList               refs_lists[],
 162                                   ReferencePolicy*             policy,
 163                                   bool                         clear_referent,
 164                                   BoolObjectClosure*           is_alive,
 165                                   OopClosure*                  keep_alive,