< prev index next >

src/hotspot/share/gc/shared/referenceProcessor.hpp

Print this page
rev 49831 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49834 : [mq]: 8202021-cleanup-referenceprocessor
rev 49835 : [mq]: 8202021-stefanj-review
rev 49836 : [mq]: 8202017-reference-processor-remove-enqueue


 125 
 126   // Move to the next discovered reference.
 127   inline void next() {
 128     _prev_discovered_addr = _discovered_addr;
 129     _prev_discovered = _current_discovered;
 130     move_to_next();
 131   }
 132 
 133   // Remove the current reference from the list
 134   void remove();
 135 
 136   // Make the referent alive.
 137   inline void make_referent_alive() {
 138     if (UseCompressedOops) {
 139       _keep_alive->do_oop((narrowOop*)_referent_addr);
 140     } else {
 141       _keep_alive->do_oop((oop*)_referent_addr);
 142     }
 143   }
 144 






 145   // NULL out referent pointer.
 146   void clear_referent();
 147 
 148   // Statistics
 149   NOT_PRODUCT(
 150   inline size_t processed() const { return _processed; }
 151   inline size_t removed() const   { return _removed; }
 152   )
 153 
 154   inline void move_to_next() {
 155     if (_current_discovered == _next_discovered) {
 156       // End of the list.
 157       _current_discovered = NULL;
 158     } else {
 159       _current_discovered = _next_discovered;
 160     }
 161     assert(_current_discovered != _first_seen, "cyclic ref_list found");
 162     NOT_PRODUCT(_processed++);
 163   }
 164 };


 255                       BoolObjectClosure* is_alive,
 256                       OopClosure*        keep_alive,
 257                       VoidClosure*       complete_gc);
 258   // Work methods in support of process_phase2
 259   void pp2_work(DiscoveredList&    refs_list,
 260                 BoolObjectClosure* is_alive,
 261                 OopClosure*        keep_alive);
 262   void pp2_work_concurrent_discovery(
 263                 DiscoveredList&    refs_list,
 264                 BoolObjectClosure* is_alive,
 265                 OopClosure*        keep_alive,
 266                 VoidClosure*       complete_gc);
 267   // Phase3: process the referents by either clearing them
 268   // or keeping them alive (and their closure), and enqueuing them.
 269   void process_phase3(DiscoveredList&    refs_list,
 270                       bool               clear_referent,
 271                       BoolObjectClosure* is_alive,
 272                       OopClosure*        keep_alive,
 273                       VoidClosure*       complete_gc);
 274 
 275   // Enqueue references with a certain reachability level
 276   void enqueue_discovered_reflist(DiscoveredList& refs_list);
 277 
 278   // "Preclean" all the discovered reference lists
 279   // by removing references with strongly reachable referents.
 280   // The first argument is a predicate on an oop that indicates
 281   // its (strong) reachability and the second is a closure that
 282   // may be used to incrementalize or abort the precleaning process.
 283   // The caller is responsible for taking care of potential
 284   // interference with concurrent operations on these lists
 285   // (or predicates involved) by other threads. Currently
 286   // only used by the CMS collector.
 287   void preclean_discovered_references(BoolObjectClosure* is_alive,
 288                                       OopClosure*        keep_alive,
 289                                       VoidClosure*       complete_gc,
 290                                       YieldClosure*      yield,
 291                                       GCTimer*           gc_timer);
 292 
 293   // Returns the name of the discovered reference list
 294   // occupying the i / _num_queues slot.
 295   const char* list_name(uint i);
 296 
 297   void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
 298                                    ReferenceProcessorPhaseTimes* phase_times);
 299 
 300   // "Preclean" the given discovered reference list
 301   // by removing references with strongly reachable referents.
 302   // Currently used in support of CMS only.
 303   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 304                                    BoolObjectClosure* is_alive,
 305                                    OopClosure*        keep_alive,
 306                                    VoidClosure*       complete_gc,
 307                                    YieldClosure*      yield);
 308 private:
 309   // round-robin mod _num_queues (not: _not_ mod _max_num_queues)
 310   uint next_id() {
 311     uint id = _next_id;
 312     assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
 313     if (++_next_id == _num_queues) {
 314       _next_id = 0;
 315     }
 316     assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues);
 317     return id;
 318   }
 319   DiscoveredList* get_discovered_list(ReferenceType rt);


 369 
 370   // whether discovery is atomic wrt other collectors
 371   bool discovery_is_atomic() const { return _discovery_is_atomic; }
 372   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 373 
 374   // whether discovery is done by multiple threads same-old-timeously
 375   bool discovery_is_mt() const { return _discovery_is_mt; }
 376   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
 377 
 378   // Whether we are in a phase when _processing_ is MT.
 379   bool processing_is_mt() const { return _processing_is_mt; }
 380   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 381 
 382   // whether all enqueueing of weak references is complete
 383   bool enqueuing_is_done()  { return _enqueuing_is_done; }
 384   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 385 
 386   // iterate over oops
 387   void weak_oops_do(OopClosure* f);       // weak roots
 388 
 389   // Balance each of the discovered lists.
 390   void balance_all_queues();
 391   void verify_list(DiscoveredList& ref_list);
 392 
 393   // Discover a Reference object, using appropriate discovery criteria
 394   bool discover_reference(oop obj, ReferenceType rt);
 395 
 396   // Has discovered references that need handling
 397   bool has_discovered_references();
 398 
 399   // Process references found during GC (called by the garbage collector)
 400   ReferenceProcessorStats
 401   process_discovered_references(BoolObjectClosure*            is_alive,
 402                                 OopClosure*                   keep_alive,
 403                                 VoidClosure*                  complete_gc,
 404                                 AbstractRefProcTaskExecutor*  task_executor,
 405                                 ReferenceProcessorPhaseTimes* phase_times);
 406 
 407   // Enqueue references at end of GC (called by the garbage collector)
 408   void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
 409                                      ReferenceProcessorPhaseTimes* phase_times);
 410 
 411   // If a discovery is in process that is being superceded, abandon it: all
 412   // the discovered lists will be empty, and all the objects on them will
 413   // have NULL discovered fields.  Must be called only at a safepoint.
 414   void abandon_partial_discovery();
 415 
 416   size_t total_reference_count(ReferenceType rt) const;
 417 
 418   // debugging
 419   void verify_no_references_recorded() PRODUCT_RETURN;
 420   void verify_referent(oop obj)        PRODUCT_RETURN;
 421 };
 422 
 423 // A reference processor that uses a single memory span to determine the area that
 424 // is subject to discovery. Useful for collectors which have contiguous generations.
 425 class SpanReferenceProcessor : public ReferenceProcessor {
 426   class SpanBasedDiscoverer : public BoolObjectClosure {
 427   public:
 428     MemRegion _span;
 429 




 125 
 126   // Move to the next discovered reference.
 127   inline void next() {
 128     _prev_discovered_addr = _discovered_addr;
 129     _prev_discovered = _current_discovered;
 130     move_to_next();
 131   }
 132 
 133   // Remove the current reference from the list
 134   void remove();
 135 
 136   // Make the referent alive.
 137   inline void make_referent_alive() {
 138     if (UseCompressedOops) {
 139       _keep_alive->do_oop((narrowOop*)_referent_addr);
 140     } else {
 141       _keep_alive->do_oop((oop*)_referent_addr);
 142     }
 143   }
 144 
 145   // Do enqueuing work, i.e. notifying the GC about the changed discovered pointers.
 146   void enqueue();
 147 
 148   // Move enqueued references to the reference pending list.
 149   void complete_enqeue();
 150 
 151   // NULL out referent pointer.
 152   void clear_referent();
 153 
 154   // Statistics
 155   NOT_PRODUCT(
 156   inline size_t processed() const { return _processed; }
 157   inline size_t removed() const   { return _removed; }
 158   )
 159 
 160   inline void move_to_next() {
 161     if (_current_discovered == _next_discovered) {
 162       // End of the list.
 163       _current_discovered = NULL;
 164     } else {
 165       _current_discovered = _next_discovered;
 166     }
 167     assert(_current_discovered != _first_seen, "cyclic ref_list found");
 168     NOT_PRODUCT(_processed++);
 169   }
 170 };


 261                       BoolObjectClosure* is_alive,
 262                       OopClosure*        keep_alive,
 263                       VoidClosure*       complete_gc);
 264   // Work methods in support of process_phase2
 265   void pp2_work(DiscoveredList&    refs_list,
 266                 BoolObjectClosure* is_alive,
 267                 OopClosure*        keep_alive);
 268   void pp2_work_concurrent_discovery(
 269                 DiscoveredList&    refs_list,
 270                 BoolObjectClosure* is_alive,
 271                 OopClosure*        keep_alive,
 272                 VoidClosure*       complete_gc);
 273   // Phase3: process the referents by either clearing them
 274   // or keeping them alive (and their closure), and enqueuing them.
 275   void process_phase3(DiscoveredList&    refs_list,
 276                       bool               clear_referent,
 277                       BoolObjectClosure* is_alive,
 278                       OopClosure*        keep_alive,
 279                       VoidClosure*       complete_gc);
 280 



 281   // "Preclean" all the discovered reference lists
 282   // by removing references with strongly reachable referents.
 283   // The first argument is a predicate on an oop that indicates
 284   // its (strong) reachability and the second is a closure that
 285   // may be used to incrementalize or abort the precleaning process.
 286   // The caller is responsible for taking care of potential
 287   // interference with concurrent operations on these lists
 288   // (or predicates involved) by other threads. Currently
 289   // only used by the CMS collector.
 290   void preclean_discovered_references(BoolObjectClosure* is_alive,
 291                                       OopClosure*        keep_alive,
 292                                       VoidClosure*       complete_gc,
 293                                       YieldClosure*      yield,
 294                                       GCTimer*           gc_timer);
 295 
 296   // Returns the name of the discovered reference list
 297   // occupying the i / _num_queues slot.
 298   const char* list_name(uint i);
 299 



 300   // "Preclean" the given discovered reference list
 301   // by removing references with strongly reachable referents.
 302   // Currently used in support of CMS only.
 303   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 304                                    BoolObjectClosure* is_alive,
 305                                    OopClosure*        keep_alive,
 306                                    VoidClosure*       complete_gc,
 307                                    YieldClosure*      yield);
 308 private:
 309   // round-robin mod _num_queues (not: _not_ mod _max_num_queues)
 310   uint next_id() {
 311     uint id = _next_id;
 312     assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
 313     if (++_next_id == _num_queues) {
 314       _next_id = 0;
 315     }
 316     assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues);
 317     return id;
 318   }
 319   DiscoveredList* get_discovered_list(ReferenceType rt);


 369 
 370   // whether discovery is atomic wrt other collectors
 371   bool discovery_is_atomic() const { return _discovery_is_atomic; }
 372   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 373 
 374   // whether discovery is done by multiple threads same-old-timeously
 375   bool discovery_is_mt() const { return _discovery_is_mt; }
 376   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
 377 
 378   // Whether we are in a phase when _processing_ is MT.
 379   bool processing_is_mt() const { return _processing_is_mt; }
 380   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 381 
 382   // whether all enqueueing of weak references is complete
 383   bool enqueuing_is_done()  { return _enqueuing_is_done; }
 384   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 385 
 386   // iterate over oops
 387   void weak_oops_do(OopClosure* f);       // weak roots
 388 


 389   void verify_list(DiscoveredList& ref_list);
 390 
 391   // Discover a Reference object, using appropriate discovery criteria
 392   bool discover_reference(oop obj, ReferenceType rt);
 393 
 394   // Has discovered references that need handling
 395   bool has_discovered_references();
 396 
 397   // Process references found during GC (called by the garbage collector)
 398   ReferenceProcessorStats
 399   process_discovered_references(BoolObjectClosure*            is_alive,
 400                                 OopClosure*                   keep_alive,
 401                                 VoidClosure*                  complete_gc,
 402                                 AbstractRefProcTaskExecutor*  task_executor,
 403                                 ReferenceProcessorPhaseTimes* phase_times);




 404 
 405   // If a discovery is in process that is being superceded, abandon it: all
 406   // the discovered lists will be empty, and all the objects on them will
 407   // have NULL discovered fields.  Must be called only at a safepoint.
 408   void abandon_partial_discovery();
 409 
 410   size_t total_reference_count(ReferenceType rt) const;
 411 
 412   // debugging
 413   void verify_no_references_recorded() PRODUCT_RETURN;
 414   void verify_referent(oop obj)        PRODUCT_RETURN;
 415 };
 416 
 417 // A reference processor that uses a single memory span to determine the area that
 418 // is subject to discovery. Useful for collectors which have contiguous generations.
 419 class SpanReferenceProcessor : public ReferenceProcessor {
 420   class SpanBasedDiscoverer : public BoolObjectClosure {
 421   public:
 422     MemRegion _span;
 423 


< prev index next >