src/share/vm/memory/referenceProcessor.hpp

Print this page
rev 5823 : 8031703: Missing post-barrier in ReferenceProcessor


  82   // Set value depending on UseCompressedOops. This could be a template class
  83   // but then we have to fix all the instantiations and declarations that use this class.
  84   oop       _oop_head;
  85   narrowOop _compressed_head;
  86   size_t _len;
  87 };
  88 
  89 // Iterator for the list of discovered references.
  90 class DiscoveredListIterator {
  91 private:
  92   DiscoveredList&    _refs_list;
  93   HeapWord*          _prev_next;
  94   oop                _prev;
  95   oop                _ref;
  96   HeapWord*          _discovered_addr;
  97   oop                _next;
  98   HeapWord*          _referent_addr;
  99   oop                _referent;
 100   OopClosure*        _keep_alive;
 101   BoolObjectClosure* _is_alive;

 102 
 103   DEBUG_ONLY(
 104   oop                _first_seen; // cyclic linked list check
 105   )
 106 
 107   NOT_PRODUCT(
 108   size_t             _processed;
 109   size_t             _removed;
 110   )
 111 
 112 public:
 113   inline DiscoveredListIterator(DiscoveredList&    refs_list,
 114                                 OopClosure*        keep_alive,
 115                                 BoolObjectClosure* is_alive):

 116     _refs_list(refs_list),
 117     _prev_next(refs_list.adr_head()),
 118     _prev(NULL),
 119     _ref(refs_list.head()),
 120 #ifdef ASSERT
 121     _first_seen(refs_list.head()),
 122 #endif
 123 #ifndef PRODUCT
 124     _processed(0),
 125     _removed(0),
 126 #endif
 127     _next(NULL),
 128     _keep_alive(keep_alive),
 129     _is_alive(is_alive)

 130 { }
 131 
 132   // End Of List.
 133   inline bool has_next() const { return _ref != NULL; }
 134 
 135   // Get oop to the Reference object.
 136   inline oop obj() const { return _ref; }
 137 
 138   // Get oop to the referent object.
 139   inline oop referent() const { return _referent; }
 140 
 141   // Returns true if referent is alive.
 142   inline bool is_referent_alive() const {
 143     return _is_alive->do_object_b(_referent);
 144   }
 145 
 146   // Loads data for the current reference.
 147   // The "allow_null_referent" argument tells us to allow for the possibility
 148   // of a NULL referent in the discovered Reference object. This typically
 149   // happens in the case of concurrent collectors that may have done the


 211 
 212  private:
 213   size_t total_count(DiscoveredList lists[]);
 214 
 215  protected:
 216   // Compatibility with pre-4965777 JDK's
 217   static bool _pending_list_uses_discovered_field;
 218 
 219   // The SoftReference master timestamp clock
 220   static jlong _soft_ref_timestamp_clock;
 221 
 222   MemRegion   _span;                    // (right-open) interval of heap
 223                                         // subject to wkref discovery
 224 
 225   bool        _discovering_refs;        // true when discovery enabled
 226   bool        _discovery_is_atomic;     // if discovery is atomic wrt
 227                                         // other collectors in configuration
 228   bool        _discovery_is_mt;         // true if reference discovery is MT.
 229 
 230   // If true, setting "next" field of a discovered refs list requires
 231   // write barrier(s).  (Must be true if used in a collector in which
 232   // elements of a discovered list may be moved during discovery: for
 233   // example, a collector like Garbage-First that moves objects during a
 234   // long-term concurrent marking phase that does weak reference
 235   // discovery.)
 236   bool        _discovered_list_needs_barrier;
 237 
 238   bool        _enqueuing_is_done;       // true if all weak references enqueued
 239   bool        _processing_is_mt;        // true during phases when
 240                                         // reference processing is MT.
 241   uint        _next_id;                 // round-robin mod _num_q counter in
 242                                         // support of work distribution
 243 
 244   // For collectors that do not keep GC liveness information
 245   // in the object header, this field holds a closure that
 246   // helps the reference processor determine the reachability
 247   // of an oop. It is currently initialized to NULL for all
 248   // collectors except for CMS and G1.
 249   BoolObjectClosure* _is_alive_non_header;
 250 
 251   // Soft ref clearing policies
 252   // . the default policy
 253   static ReferencePolicy*   _default_soft_ref_policy;
 254   // . the "clear all" policy
 255   static ReferencePolicy*   _always_clear_soft_ref_policy;
 256   // . the current policy below is either one of the above


 363   // either a null referent or are not active. Such
 364   // Reference objects can result from the clearing
 365   // or enqueueing of Reference objects concurrent
 366   // with their discovery by a (concurrent) collector.
 367   // For a definition of "active" see java.lang.ref.Reference;
 368   // Refs are born active, become inactive when enqueued,
 369   // and never become active again. The state of being
 370   // active is encoded as follows: A Ref is active
 371   // if and only if its "next" field is NULL.
 372   void clean_up_discovered_references();
 373   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 374 
 375   // Returns the name of the discovered reference list
 376   // occupying the i / _num_q slot.
 377   const char* list_name(uint i);
 378 
 379   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 380 
 381  protected:
 382   // Set the 'discovered' field of the given reference to
 383   // the given value - emitting barriers depending upon
 384   // the value of _discovered_list_needs_barrier.
 385   void set_discovered(oop ref, oop value);
 386 
 387   // "Preclean" the given discovered reference list
 388   // by removing references with strongly reachable referents.
 389   // Currently used in support of CMS only.
 390   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 391                                    BoolObjectClosure* is_alive,
 392                                    OopClosure*        keep_alive,
 393                                    VoidClosure*       complete_gc,
 394                                    YieldClosure*      yield);
 395 
 396   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 397   uint next_id() {
 398     uint id = _next_id;
 399     if (++_next_id == _num_q) {
 400       _next_id = 0;
 401     }
 402     return id;
 403   }
 404   DiscoveredList* get_discovered_list(ReferenceType rt);


 408 
 409   void clear_discovered_references(DiscoveredList& refs_list);
 410   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 411 
 412   // Calculate the number of jni handles.
 413   unsigned int count_jni_refs();
 414 
 415   // Balances reference queues.
 416   void balance_queues(DiscoveredList ref_lists[]);
 417 
 418   // Update (advance) the soft ref master clock field.
 419   void update_soft_ref_master_clock();
 420 
 421  public:
 422   // Default parameters give you a vanilla reference processor.
 423   ReferenceProcessor(MemRegion span,
 424                      bool mt_processing = false, uint mt_processing_degree = 1,
 425                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
 426                      bool atomic_discovery = true,
 427                      BoolObjectClosure* is_alive_non_header = NULL,
 428                      bool discovered_list_needs_barrier = false);
 429 
 430   // RefDiscoveryPolicy values
 431   enum DiscoveryPolicy {
 432     ReferenceBasedDiscovery = 0,
 433     ReferentBasedDiscovery  = 1,
 434     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 435     DiscoveryPolicyMax      = ReferentBasedDiscovery
 436   };
 437 
 438   static void init_statics();
 439 
 440  public:
 441   // get and set "is_alive_non_header" field
 442   BoolObjectClosure* is_alive_non_header() {
 443     return _is_alive_non_header;
 444   }
 445   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 446     _is_alive_non_header = is_alive_non_header;
 447   }
 448 




  82   // Set value depending on UseCompressedOops. This could be a template class
  83   // but then we have to fix all the instantiations and declarations that use this class.
  84   oop       _oop_head;
  85   narrowOop _compressed_head;
  86   size_t _len;
  87 };
  88 
  89 // Iterator for the list of discovered references.
  90 class DiscoveredListIterator {
  91 private:
  92   DiscoveredList&    _refs_list;
  93   HeapWord*          _prev_next;
  94   oop                _prev;
  95   oop                _ref;
  96   HeapWord*          _discovered_addr;
  97   oop                _next;
  98   HeapWord*          _referent_addr;
  99   oop                _referent;
 100   OopClosure*        _keep_alive;
 101   BoolObjectClosure* _is_alive;
 102   bool               _discovered_list_needs_post_barrier;
 103 
 104   DEBUG_ONLY(
 105   oop                _first_seen; // cyclic linked list check
 106   )
 107 
 108   NOT_PRODUCT(
 109   size_t             _processed;
 110   size_t             _removed;
 111   )
 112 
 113 public:
 114   inline DiscoveredListIterator(DiscoveredList&    refs_list,
 115                                 OopClosure*        keep_alive,
 116                                 BoolObjectClosure* is_alive,
 117                                 bool               discovered_list_needs_post_barrier = false):
 118     _refs_list(refs_list),
 119     _prev_next(refs_list.adr_head()),
 120     _prev(NULL),
 121     _ref(refs_list.head()),
 122 #ifdef ASSERT
 123     _first_seen(refs_list.head()),
 124 #endif
 125 #ifndef PRODUCT
 126     _processed(0),
 127     _removed(0),
 128 #endif
 129     _next(NULL),
 130     _keep_alive(keep_alive),
 131     _is_alive(is_alive),
 132     _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
 133 { }
 134 
 135   // End Of List.
 136   inline bool has_next() const { return _ref != NULL; }
 137 
 138   // Get oop to the Reference object.
 139   inline oop obj() const { return _ref; }
 140 
 141   // Get oop to the referent object.
 142   inline oop referent() const { return _referent; }
 143 
 144   // Returns true if referent is alive.
 145   inline bool is_referent_alive() const {
 146     return _is_alive->do_object_b(_referent);
 147   }
 148 
 149   // Loads data for the current reference.
 150   // The "allow_null_referent" argument tells us to allow for the possibility
 151   // of a NULL referent in the discovered Reference object. This typically
 152   // happens in the case of concurrent collectors that may have done the


 214 
 215  private:
 216   size_t total_count(DiscoveredList lists[]);
 217 
 218  protected:
 219   // Compatibility with pre-4965777 JDK's
 220   static bool _pending_list_uses_discovered_field;
 221 
 222   // The SoftReference master timestamp clock
 223   static jlong _soft_ref_timestamp_clock;
 224 
 225   MemRegion   _span;                    // (right-open) interval of heap
 226                                         // subject to wkref discovery
 227 
 228   bool        _discovering_refs;        // true when discovery enabled
 229   bool        _discovery_is_atomic;     // if discovery is atomic wrt
 230                                         // other collectors in configuration
 231   bool        _discovery_is_mt;         // true if reference discovery is MT.
 232 
 233   // If true, setting "next" field of a discovered refs list requires
 234   // write post barrier.  (Must be true if used in a collector in which
 235   // elements of a discovered list may be moved during discovery: for
 236   // example, a collector like Garbage-First that moves objects during a
 237   // long-term concurrent marking phase that does weak reference
 238   // discovery.)
 239   bool        _discovered_list_needs_post_barrier;
 240 
 241   bool        _enqueuing_is_done;       // true if all weak references enqueued
 242   bool        _processing_is_mt;        // true during phases when
 243                                         // reference processing is MT.
 244   uint        _next_id;                 // round-robin mod _num_q counter in
 245                                         // support of work distribution
 246 
 247   // For collectors that do not keep GC liveness information
 248   // in the object header, this field holds a closure that
 249   // helps the reference processor determine the reachability
 250   // of an oop. It is currently initialized to NULL for all
 251   // collectors except for CMS and G1.
 252   BoolObjectClosure* _is_alive_non_header;
 253 
 254   // Soft ref clearing policies
 255   // . the default policy
 256   static ReferencePolicy*   _default_soft_ref_policy;
 257   // . the "clear all" policy
 258   static ReferencePolicy*   _always_clear_soft_ref_policy;
 259   // . the current policy below is either one of the above


 366   // either a null referent or are not active. Such
 367   // Reference objects can result from the clearing
 368   // or enqueueing of Reference objects concurrent
 369   // with their discovery by a (concurrent) collector.
 370   // For a definition of "active" see java.lang.ref.Reference;
 371   // Refs are born active, become inactive when enqueued,
 372   // and never become active again. The state of being
 373   // active is encoded as follows: A Ref is active
 374   // if and only if its "next" field is NULL.
 375   void clean_up_discovered_references();
 376   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 377 
 378   // Returns the name of the discovered reference list
 379   // occupying the i / _num_q slot.
 380   const char* list_name(uint i);
 381 
 382   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 383 
 384  protected:
 385   // Set the 'discovered' field of the given reference to
 386   // the given value - emitting post barriers depending upon
 387   // the value of _discovered_list_needs_post_barrier.
 388   void set_discovered(oop ref, oop value);
 389 
 390   // "Preclean" the given discovered reference list
 391   // by removing references with strongly reachable referents.
 392   // Currently used in support of CMS only.
 393   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 394                                    BoolObjectClosure* is_alive,
 395                                    OopClosure*        keep_alive,
 396                                    VoidClosure*       complete_gc,
 397                                    YieldClosure*      yield);
 398 
 399   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 400   uint next_id() {
 401     uint id = _next_id;
 402     if (++_next_id == _num_q) {
 403       _next_id = 0;
 404     }
 405     return id;
 406   }
 407   DiscoveredList* get_discovered_list(ReferenceType rt);


 411 
 412   void clear_discovered_references(DiscoveredList& refs_list);
 413   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 414 
 415   // Calculate the number of jni handles.
 416   unsigned int count_jni_refs();
 417 
 418   // Balances reference queues.
 419   void balance_queues(DiscoveredList ref_lists[]);
 420 
 421   // Update (advance) the soft ref master clock field.
 422   void update_soft_ref_master_clock();
 423 
 424  public:
 425   // Default parameters give you a vanilla reference processor.
 426   ReferenceProcessor(MemRegion span,
 427                      bool mt_processing = false, uint mt_processing_degree = 1,
 428                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
 429                      bool atomic_discovery = true,
 430                      BoolObjectClosure* is_alive_non_header = NULL,
 431                      bool discovered_list_needs_post_barrier = false);
 432 
 433   // RefDiscoveryPolicy values
 434   enum DiscoveryPolicy {
 435     ReferenceBasedDiscovery = 0,
 436     ReferentBasedDiscovery  = 1,
 437     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 438     DiscoveryPolicyMax      = ReferentBasedDiscovery
 439   };
 440 
 441   static void init_statics();
 442 
 443  public:
 444   // get and set "is_alive_non_header" field
 445   BoolObjectClosure* is_alive_non_header() {
 446     return _is_alive_non_header;
 447   }
 448   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 449     _is_alive_non_header = is_alive_non_header;
 450   }
 451