< prev index next >

src/hotspot/share/gc/shared/referenceProcessor.hpp

Print this page
rev 49944 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49945 : imported patch 8201492-stefanj-review
rev 49946 : imported patch 8201492-kim-review
rev 49949 : imported patch 8202021-cleanup-referenceprocessor
rev 49950 : imported patch 8202021-stefanj-review
rev 49951 : imported patch 8202017-reference-processor-remove-enqueue
rev 49953 : imported patch 8201491-precleaning
rev 49954 : [mq]: 8201491-kbarrett-review


 262                       BoolObjectClosure* is_alive,
 263                       OopClosure*        keep_alive,
 264                       VoidClosure*       complete_gc);
 265   // Work methods in support of process_phase2
 266   void pp2_work(DiscoveredList&    refs_list,
 267                 BoolObjectClosure* is_alive,
 268                 OopClosure*        keep_alive);
 269   void pp2_work_concurrent_discovery(
 270                 DiscoveredList&    refs_list,
 271                 BoolObjectClosure* is_alive,
 272                 OopClosure*        keep_alive,
 273                 VoidClosure*       complete_gc);
 274   // Phase3: process the referents by either clearing them
 275   // or keeping them alive (and their closure), and enqueuing them.
 276   void process_phase3(DiscoveredList&    refs_list,
 277                       bool               clear_referent,
 278                       BoolObjectClosure* is_alive,
 279                       OopClosure*        keep_alive,
 280                       VoidClosure*       complete_gc);
 281 
 282   // "Preclean" all the discovered reference lists
 283   // by removing references with strongly reachable referents.

 284   // The first argument is a predicate on an oop that indicates
 285   // its (strong) reachability and the second is a closure that
 286   // may be used to incrementalize or abort the precleaning process.
 287   // The caller is responsible for taking care of potential
 288   // interference with concurrent operations on these lists
 289   // (or predicates involved) by other threads. Currently
 290   // only used by the CMS collector.
 291   void preclean_discovered_references(BoolObjectClosure* is_alive,
 292                                       OopClosure*        keep_alive,
 293                                       VoidClosure*       complete_gc,
 294                                       YieldClosure*      yield,
 295                                       GCTimer*           gc_timer);
 296 
 297   // Returns the name of the discovered reference list
 298   // occupying the i / _num_queues slot.
 299   const char* list_name(uint i);
 300 
 301   // "Preclean" the given discovered reference list
 302   // by removing references with strongly reachable referents.
 303   // Currently used in support of CMS only.
 304   void preclean_discovered_reflist(DiscoveredList&    refs_list,


 305                                    BoolObjectClosure* is_alive,
 306                                    OopClosure*        keep_alive,
 307                                    VoidClosure*       complete_gc,
 308                                    YieldClosure*      yield);
 309 private:
 310   // round-robin mod _num_queues (not: _not_ mod _max_num_queues)
 311   uint next_id() {
 312     uint id = _next_id;
 313     assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
 314     if (++_next_id == _num_queues) {
 315       _next_id = 0;
 316     }
 317     assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues);
 318     return id;
 319   }
 320   DiscoveredList* get_discovered_list(ReferenceType rt);
 321   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 322                                         HeapWord* discovered_addr);
 323 
 324   void clear_discovered_references(DiscoveredList& refs_list);
 325 
 326   void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN;

 327 
 328   // Balances reference queues.
 329   void balance_queues(DiscoveredList ref_lists[]);
 330 
 331   // Update (advance) the soft ref master clock field.
 332   void update_soft_ref_master_clock();
 333 
 334   bool is_subject_to_discovery(oop const obj) const;
 335 
 336 public:
 337   // Default parameters give you a vanilla reference processor.
 338   ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
 339                      bool mt_processing = false, uint mt_processing_degree = 1,
 340                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
 341                      bool atomic_discovery = true,
 342                      BoolObjectClosure* is_alive_non_header = NULL);
 343 
 344   // RefDiscoveryPolicy values
 345   enum DiscoveryPolicy {
 346     ReferenceBasedDiscovery = 0,




 262                       BoolObjectClosure* is_alive,
 263                       OopClosure*        keep_alive,
 264                       VoidClosure*       complete_gc);
 265   // Work methods in support of process_phase2
 266   void pp2_work(DiscoveredList&    refs_list,
 267                 BoolObjectClosure* is_alive,
 268                 OopClosure*        keep_alive);
 269   void pp2_work_concurrent_discovery(
 270                 DiscoveredList&    refs_list,
 271                 BoolObjectClosure* is_alive,
 272                 OopClosure*        keep_alive,
 273                 VoidClosure*       complete_gc);
 274   // Phase3: process the referents by either clearing them
 275   // or keeping them alive (and their closure), and enqueuing them.
 276   void process_phase3(DiscoveredList&    refs_list,
 277                       bool               clear_referent,
 278                       BoolObjectClosure* is_alive,
 279                       OopClosure*        keep_alive,
 280                       VoidClosure*       complete_gc);
 281 
 282   // "Preclean" all the discovered reference lists by removing references that
 283   // are active (e.g. due to the mutator calling enqueue()) or with NULL or
 284   // strongly reachable referents.
 285   // The first argument is a predicate on an oop that indicates
 286   // its (strong) reachability and the fourth is a closure that
 287   // may be used to incrementalize or abort the precleaning process.
 288   // The caller is responsible for taking care of potential
 289   // interference with concurrent operations on these lists
 290   // (or predicates involved) by other threads.

 291   void preclean_discovered_references(BoolObjectClosure* is_alive,
 292                                       OopClosure*        keep_alive,
 293                                       VoidClosure*       complete_gc,
 294                                       YieldClosure*      yield,
 295                                       GCTimer*           gc_timer);
 296 
 297   // Returns the name of the discovered reference list
 298   // occupying the i / _num_queues slot.
 299   const char* list_name(uint i);
 300 
 301 private:
 302   // "Preclean" the given discovered reference list by removing references with
 303   // the attributes mentioned in preclean_discovered_references().
 304   // Supports both normal and fine grain yielding.
 305   // Returns whether the operation should be aborted.
 306   bool preclean_discovered_reflist(DiscoveredList&    refs_list,
 307                                    BoolObjectClosure* is_alive,
 308                                    OopClosure*        keep_alive,
 309                                    VoidClosure*       complete_gc,
 310                                    YieldClosure*      yield);
 311 
 312   // round-robin mod _num_queues (not: _not_ mod _max_num_queues)
 313   uint next_id() {
 314     uint id = _next_id;
 315     assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
 316     if (++_next_id == _num_queues) {
 317       _next_id = 0;
 318     }
 319     assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues);
 320     return id;
 321   }
 322   DiscoveredList* get_discovered_list(ReferenceType rt);
 323   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 324                                         HeapWord* discovered_addr);
 325 
 326   void clear_discovered_references(DiscoveredList& refs_list);
 327 
 328   void log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues);
 329   void log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) PRODUCT_RETURN;
 330 
 331   // Balances reference queues.
 332   void balance_queues(DiscoveredList ref_lists[]);
 333 
 334   // Update (advance) the soft ref master clock field.
 335   void update_soft_ref_master_clock();
 336 
 337   bool is_subject_to_discovery(oop const obj) const;
 338 
 339 public:
 340   // Default parameters give you a vanilla reference processor.
 341   ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
 342                      bool mt_processing = false, uint mt_processing_degree = 1,
 343                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
 344                      bool atomic_discovery = true,
 345                      BoolObjectClosure* is_alive_non_header = NULL);
 346 
 347   // RefDiscoveryPolicy values
 348   enum DiscoveryPolicy {
 349     ReferenceBasedDiscovery = 0,


< prev index next >