Print this page
rev 2691 : [mq]: g1-reference-processing

Split Close
Expand all
Collapse all
          --- old/src/share/vm/memory/referenceProcessor.hpp
          +++ new/src/share/vm/memory/referenceProcessor.hpp
↓ open down ↓ 40 lines elided ↑ open up ↑
  41   41  // The basic idea is that each ReferenceProcessor object concerns
  42   42  // itself with ("weak") reference processing in a specific "span"
  43   43  // of the heap of interest to a specific collector. Currently,
  44   44  // the span is a convex interval of the heap, but, efficiency
  45   45  // apart, there seems to be no reason it couldn't be extended
  46   46  // (with appropriate modifications) to any "non-convex interval".
  47   47  
  48   48  // forward references
  49   49  class ReferencePolicy;
  50   50  class AbstractRefProcTaskExecutor;
  51      -class DiscoveredList;
       51 +
       52 +// List of discovered references.
       53 +class DiscoveredList {
       54 +public:
       55 +  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
       56 +  oop head() const     {
       57 +     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
       58 +                                _oop_head;
       59 +  }
       60 +  HeapWord* adr_head() {
       61 +    return UseCompressedOops ? (HeapWord*)&_compressed_head :
       62 +                               (HeapWord*)&_oop_head;
       63 +  }
       64 +  void set_head(oop o) {
       65 +    if (UseCompressedOops) {
       66 +      // Must compress the head ptr.
       67 +      _compressed_head = oopDesc::encode_heap_oop(o);
       68 +    } else {
       69 +      _oop_head = o;
       70 +    }
       71 +  }
       72 +  bool   is_empty() const       { return head() == NULL; }
       73 +  size_t length()               { return _len; }
       74 +  void   set_length(size_t len) { _len = len;  }
       75 +  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
       76 +  void   dec_length(size_t dec) { _len -= dec; }
       77 +private:
       78 +  // Set value depending on UseCompressedOops. This could be a template class
       79 +  // but then we have to fix all the instantiations and declarations that use this class.
       80 +  oop       _oop_head;
       81 +  narrowOop _compressed_head;
       82 +  size_t _len;
       83 +};
       84 +
       85 +// Iterator for the list of discovered references.
       86 +class DiscoveredListIterator {
       87 +private:
       88 +  DiscoveredList&    _refs_list;
       89 +  HeapWord*          _prev_next;
       90 +  oop                _prev;
       91 +  oop                _ref;
       92 +  HeapWord*          _discovered_addr;
       93 +  oop                _next;
       94 +  HeapWord*          _referent_addr;
       95 +  oop                _referent;
       96 +  OopClosure*        _keep_alive;
       97 +  BoolObjectClosure* _is_alive;
       98 +
       99 +  DEBUG_ONLY(
      100 +  oop                _first_seen; // cyclic linked list check
      101 +  )
      102 +
      103 +  NOT_PRODUCT(
      104 +  size_t             _processed;
      105 +  size_t             _removed;
      106 +  )
      107 +
      108 +public:
      109 +  inline DiscoveredListIterator(DiscoveredList&    refs_list,
      110 +                                OopClosure*        keep_alive,
      111 +                                BoolObjectClosure* is_alive):
      112 +    _refs_list(refs_list),
      113 +    _prev_next(refs_list.adr_head()),
      114 +    _prev(NULL),
      115 +    _ref(refs_list.head()),
      116 +#ifdef ASSERT
      117 +    _first_seen(refs_list.head()),
      118 +#endif
      119 +#ifndef PRODUCT
      120 +    _processed(0),
      121 +    _removed(0),
      122 +#endif
      123 +    _next(NULL),
      124 +    _keep_alive(keep_alive),
      125 +    _is_alive(is_alive)
      126 +{ }
      127 +
      128 +  // End Of List.
      129 +  inline bool has_next() const { return _ref != NULL; }
      130 +
      131 +  // Get oop to the Reference object.
      132 +  inline oop obj() const { return _ref; }
      133 +
      134 +  // Get oop to the referent object.
      135 +  inline oop referent() const { return _referent; }
      136 +
      137 +  // Returns true if referent is alive.
      138 +  inline bool is_referent_alive() const {
      139 +    return _is_alive->do_object_b(_referent);
      140 +  }
      141 +
      142 +  // Loads data for the current reference.
      143 +  // The "allow_null_referent" argument tells us to allow for the possibility
      144 +  // of a NULL referent in the discovered Reference object. This typically
      145 +  // happens in the case of concurrent collectors that may have done the
      146 +  // discovery concurrently, or interleaved, with mutator execution.
      147 +  void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
      148 +
      149 +  // Move to the next discovered reference.
      150 +  inline void next() {
      151 +    _prev_next = _discovered_addr;
      152 +    _prev = _ref;
      153 +    move_to_next();
      154 +  }
      155 +
      156 +  // Remove the current reference from the list
      157 +  void remove();
      158 +
      159 +  // Make the Reference object active again.
      160 +  void make_active();
      161 +
      162 +  // Make the referent alive.
      163 +  inline void make_referent_alive() {
      164 +    if (UseCompressedOops) {
      165 +      _keep_alive->do_oop((narrowOop*)_referent_addr);
      166 +    } else {
      167 +      _keep_alive->do_oop((oop*)_referent_addr);
      168 +    }
      169 +  }
      170 +
      171 +  // Update the discovered field.
      172 +  inline void update_discovered() {
      173 +    // First _prev_next ref actually points into DiscoveredList (gross).
      174 +    if (UseCompressedOops) {
      175 +      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
      176 +        _keep_alive->do_oop((narrowOop*)_prev_next);
      177 +      }
      178 +    } else {
      179 +      if (!oopDesc::is_null(*(oop*)_prev_next)) {
      180 +        _keep_alive->do_oop((oop*)_prev_next);
      181 +      }
      182 +    }
      183 +  }
      184 +
      185 +  // NULL out referent pointer.
      186 +  void clear_referent();
      187 +
      188 +  // Statistics
      189 +  NOT_PRODUCT(
      190 +  inline size_t processed() const { return _processed; }
      191 +  inline size_t removed() const   { return _removed; }
      192 +  )
      193 +
      194 +  inline void move_to_next() {
      195 +    if (_ref == _next) {
      196 +      // End of the list.
      197 +      _ref = NULL;
      198 +    } else {
      199 +      _ref = _next;
      200 +    }
      201 +    assert(_ref != _first_seen, "cyclic ref_list found");
      202 +    NOT_PRODUCT(_processed++);
      203 +  }
      204 +
      205 +};
  52  206  
  53  207  class ReferenceProcessor : public CHeapObj {
  54  208   protected:
  55  209    // Compatibility with pre-4965777 JDK's
  56  210    static bool _pending_list_uses_discovered_field;
  57      -  MemRegion   _span; // (right-open) interval of heap
  58      -                     // subject to wkref discovery
  59      -  bool        _discovering_refs;      // true when discovery enabled
  60      -  bool        _discovery_is_atomic;   // if discovery is atomic wrt
  61      -                                      // other collectors in configuration
  62      -  bool        _discovery_is_mt;       // true if reference discovery is MT.
      211 +
      212 +  MemRegion   _span;                    // (right-open) interval of heap
      213 +                                        // subject to wkref discovery
      214 +
      215 +  bool        _discovering_refs;        // true when discovery enabled
      216 +  bool        _discovery_is_atomic;     // if discovery is atomic wrt
      217 +                                        // other collectors in configuration
      218 +  bool        _discovery_is_mt;         // true if reference discovery is MT.
      219 +
  63  220    // If true, setting "next" field of a discovered refs list requires
  64  221    // write barrier(s).  (Must be true if used in a collector in which
  65  222    // elements of a discovered list may be moved during discovery: for
  66  223    // example, a collector like Garbage-First that moves objects during a
  67  224    // long-term concurrent marking phase that does weak reference
  68  225    // discovery.)
  69  226    bool        _discovered_list_needs_barrier;
  70      -  BarrierSet* _bs;                    // Cached copy of BarrierSet.
  71      -  bool        _enqueuing_is_done;     // true if all weak references enqueued
  72      -  bool        _processing_is_mt;      // true during phases when
  73      -                                      // reference processing is MT.
  74      -  int         _next_id;               // round-robin mod _num_q counter in
  75      -                                      // support of work distribution
  76  227  
  77      -  // For collectors that do not keep GC marking information
      228 +  BarrierSet* _bs;                      // Cached copy of BarrierSet.
      229 +  bool        _enqueuing_is_done;       // true if all weak references enqueued
      230 +  bool        _processing_is_mt;        // true during phases when
      231 +                                        // reference processing is MT.
      232 +  int         _next_id;                 // round-robin mod _num_q counter in
      233 +                                        // support of work distribution
      234 +
      235 +  // For collectors that do not keep GC liveness information
  78  236    // in the object header, this field holds a closure that
  79  237    // helps the reference processor determine the reachability
  80      -  // of an oop (the field is currently initialized to NULL for
  81      -  // all collectors but the CMS collector).
      238 +  // of an oop. It is currently initialized to NULL for all
      239 +  // collectors except for CMS and G1.
  82  240    BoolObjectClosure* _is_alive_non_header;
  83  241  
  84  242    // Soft ref clearing policies
  85  243    // . the default policy
  86  244    static ReferencePolicy*   _default_soft_ref_policy;
  87  245    // . the "clear all" policy
  88  246    static ReferencePolicy*   _always_clear_soft_ref_policy;
  89  247    // . the current policy below is either one of the above
  90  248    ReferencePolicy*          _current_soft_ref_policy;
  91  249  
↓ open down ↓ 3 lines elided ↑ open up ↑
  95  253    int             _num_q;
  96  254    // The maximum MT'ness degree of the queues below
  97  255    int             _max_num_q;
  98  256    // Arrays of lists of oops, one per thread
  99  257    DiscoveredList* _discoveredSoftRefs;
 100  258    DiscoveredList* _discoveredWeakRefs;
 101  259    DiscoveredList* _discoveredFinalRefs;
 102  260    DiscoveredList* _discoveredPhantomRefs;
 103  261  
 104  262   public:
 105      -  int num_q()                            { return _num_q; }
 106      -  int max_num_q()                        { return _max_num_q; }
 107      -  void set_active_mt_degree(int v)       { _num_q = v; }
 108      -  DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
      263 +  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
      264 +
      265 +  int num_q()                              { return _num_q; }
      266 +  int max_num_q()                          { return _max_num_q; }
      267 +  void set_active_mt_degree(int v)         { _num_q = v; }
      268 +  DiscoveredList* discovered_soft_refs()   { return _discoveredSoftRefs; }
      269 +
 109  270    ReferencePolicy* setup_policy(bool always_clear) {
 110  271      _current_soft_ref_policy = always_clear ?
 111  272        _always_clear_soft_ref_policy : _default_soft_ref_policy;
 112  273      _current_soft_ref_policy->setup();   // snapshot the policy threshold
 113  274      return _current_soft_ref_policy;
 114  275    }
 115  276  
 116  277    // Process references with a certain reachability level.
 117  278    void process_discovered_reflist(DiscoveredList               refs_lists[],
 118  279                                    ReferencePolicy*             policy,
↓ open down ↓ 79 lines elided ↑ open up ↑
 198  359    void clean_up_discovered_references();
 199  360    void clean_up_discovered_reflist(DiscoveredList& refs_list);
 200  361  
 201  362    // Returns the name of the discovered reference list
 202  363    // occupying the i / _num_q slot.
 203  364    const char* list_name(int i);
 204  365  
 205  366    void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 206  367  
 207  368   protected:
      369 +  // Set the 'discovered' field of the given reference to
      370 +  // the given value - emitting barriers depending upon
      371 +  // the value of _discovered_list_needs_barrier.
      372 +  void set_discovered(oop ref, oop value);
      373 +
 208  374    // "Preclean" the given discovered reference list
 209  375    // by removing references with strongly reachable referents.
 210  376    // Currently used in support of CMS only.
 211  377    void preclean_discovered_reflist(DiscoveredList&    refs_list,
 212  378                                     BoolObjectClosure* is_alive,
 213  379                                     OopClosure*        keep_alive,
 214  380                                     VoidClosure*       complete_gc,
 215  381                                     YieldClosure*      yield);
 216  382  
 217  383    // round-robin mod _num_q (not: _not_ mode _max_num_q)
↓ open down ↓ 65 lines elided ↑ open up ↑
 283  449    }
 284  450    void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 285  451      _is_alive_non_header = is_alive_non_header;
 286  452    }
 287  453  
 288  454    // get and set span
 289  455    MemRegion span()                   { return _span; }
 290  456    void      set_span(MemRegion span) { _span = span; }
 291  457  
 292  458    // start and stop weak ref discovery
 293      -  void enable_discovery()   { _discovering_refs = true;  }
      459 +  void enable_discovery(bool verify_disabled, bool check_no_refs) {
      460 +#ifdef ASSERT
      461 +    // Verify that we're not currently discovering refs
      462 +    assert(!verify_disabled || !_discovering_refs, "nested call?");
      463 +
      464 +    if (check_no_refs) {
      465 +      // Verify that the discovered lists are empty
      466 +      verify_no_references_recorded();
      467 +    }
      468 +#endif // ASSERT
      469 +    _discovering_refs = true;
      470 +  }
      471 +
 294  472    void disable_discovery()  { _discovering_refs = false; }
 295  473    bool discovery_enabled()  { return _discovering_refs;  }
 296  474  
 297  475    // whether discovery is atomic wrt other collectors
 298  476    bool discovery_is_atomic() const { return _discovery_is_atomic; }
 299  477    void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 300  478  
 301  479    // whether the JDK in which we are embedded is a pre-4965777 JDK,
 302  480    // and thus whether or not it uses the discovered field to chain
 303  481    // the entries in the pending list.
↓ open down ↓ 54 lines elided ↑ open up ↑
 358  536   public:
 359  537    NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
 360  538      _was_discovering_refs = _rp->discovery_enabled();
 361  539      if (_was_discovering_refs) {
 362  540        _rp->disable_discovery();
 363  541      }
 364  542    }
 365  543  
 366  544    ~NoRefDiscovery() {
 367  545      if (_was_discovering_refs) {
 368      -      _rp->enable_discovery();
      546 +      _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
 369  547      }
 370  548    }
 371  549  };
 372  550  
 373  551  
 374  552  // A utility class to temporarily mutate the span of the
 375  553  // given ReferenceProcessor in the scope that contains it.
 376  554  class ReferenceProcessorSpanMutator: StackObj {
 377  555   private:
 378  556    ReferenceProcessor* _rp;
↓ open down ↓ 169 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX