1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // ReferenceProcessor class encapsulates the per-"collector" processing
  26 // of java.lang.Reference objects for GC. The interface is useful for supporting
  27 // a generational abstraction, in particular when there are multiple
  28 // generations that are being independently collected -- possibly
  29 // concurrently and/or incrementally.  Note, however, that the
  30 // ReferenceProcessor class abstracts away from a generational setting
  31 // by using only a heap interval (called "span" below), thus allowing
  32 // its use in a straightforward manner in a general, non-generational
  33 // setting.
  34 //
  35 // The basic idea is that each ReferenceProcessor object concerns
  36 // itself with ("weak") reference processing in a specific "span"
  37 // of the heap of interest to a specific collector. Currently,
  38 // the span is a convex interval of the heap, but, efficiency
  39 // apart, there seems to be no reason it couldn't be extended
  40 // (with appropriate modifications) to any "non-convex interval".
  41 
  42 // forward references
  43 class ReferencePolicy;
  44 class AbstractRefProcTaskExecutor;
  45 class DiscoveredList;
  46 
  47 class ReferenceProcessor : public CHeapObj {
  48  protected:
  49   // End of list marker
  50   static oop  _sentinelRef;
  51   MemRegion   _span; // (right-open) interval of heap
  52                      // subject to wkref discovery
  53   bool        _discovering_refs;      // true when discovery enabled
  54   bool        _discovery_is_atomic;   // if discovery is atomic wrt
  55                                       // other collectors in configuration
  56   bool        _discovery_is_mt;       // true if reference discovery is MT.
  57   // If true, setting "next" field of a discovered refs list requires
  58   // write barrier(s).  (Must be true if used in a collector in which
  59   // elements of a discovered list may be moved during discovery: for
  60   // example, a collector like Garbage-First that moves objects during a
  61   // long-term concurrent marking phase that does weak reference
  62   // discovery.)
  63   bool        _discovered_list_needs_barrier;
  64   BarrierSet* _bs;                    // Cached copy of BarrierSet.
  65   bool        _enqueuing_is_done;     // true if all weak references enqueued
  66   bool        _processing_is_mt;      // true during phases when
  67                                       // reference processing is MT.
  68   int         _next_id;               // round-robin counter in
  69                                       // support of work distribution
  70 
  71   // For collectors that do not keep GC marking information
  72   // in the object header, this field holds a closure that
  73   // helps the reference processor determine the reachability
  74   // of an oop (the field is currently initialized to NULL for
  75   // all collectors but the CMS collector).
  76   BoolObjectClosure* _is_alive_non_header;
  77 
  78   // Soft ref clearing policies
  79   // . the default policy
  80   static ReferencePolicy*   _default_soft_ref_policy;
  81   // . the "clear all" policy
  82   static ReferencePolicy*   _always_clear_soft_ref_policy;
  83   // . the current policy below is either one of the above
  84   ReferencePolicy*          _current_soft_ref_policy;
  85 
  86   // The discovered ref lists themselves
  87 
  88   // The active MT'ness degree of the queues below
  89   int             _num_q;
  90   // The maximum MT'ness degree of the queues below
  91   int             _max_num_q;
  92   // Arrays of lists of oops, one per thread
  93   DiscoveredList* _discoveredSoftRefs;
  94   DiscoveredList* _discoveredWeakRefs;
  95   DiscoveredList* _discoveredFinalRefs;
  96   DiscoveredList* _discoveredPhantomRefs;
  97 
  98  public:
  99   int num_q()                            { return _num_q; }
 100   void set_mt_degree(int v)              { _num_q = v; }
 101   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 102   static oop  sentinel_ref()             { return _sentinelRef; }
 103   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 104   ReferencePolicy* setup_policy(bool always_clear) {
 105     _current_soft_ref_policy = always_clear ?
 106       _always_clear_soft_ref_policy : _default_soft_ref_policy;
 107     _current_soft_ref_policy->setup();   // snapshot the policy threshold
 108     return _current_soft_ref_policy;
 109   }
 110 
 111  public:
 112   // Process references with a certain reachability level.
 113   void process_discovered_reflist(DiscoveredList               refs_lists[],
 114                                   ReferencePolicy*             policy,
 115                                   bool                         clear_referent,
 116                                   BoolObjectClosure*           is_alive,
 117                                   OopClosure*                  keep_alive,
 118                                   VoidClosure*                 complete_gc,
 119                                   AbstractRefProcTaskExecutor* task_executor);
 120 
 121   void process_phaseJNI(BoolObjectClosure* is_alive,
 122                         OopClosure*        keep_alive,
 123                         VoidClosure*       complete_gc);
 124 
 125   // Work methods used by the method process_discovered_reflist
 126   // Phase1: keep alive all those referents that are otherwise
 127   // dead but which must be kept alive by policy (and their closure).
 128   void process_phase1(DiscoveredList&     refs_list,
 129                       ReferencePolicy*    policy,
 130                       BoolObjectClosure*  is_alive,
 131                       OopClosure*         keep_alive,
 132                       VoidClosure*        complete_gc);
 133   // Phase2: remove all those references whose referents are
 134   // reachable.
 135   inline void process_phase2(DiscoveredList&    refs_list,
 136                              BoolObjectClosure* is_alive,
 137                              OopClosure*        keep_alive,
 138                              VoidClosure*       complete_gc) {
 139     if (discovery_is_atomic()) {
 140       // complete_gc is ignored in this case for this phase
 141       pp2_work(refs_list, is_alive, keep_alive);
 142     } else {
 143       assert(complete_gc != NULL, "Error");
 144       pp2_work_concurrent_discovery(refs_list, is_alive,
 145                                     keep_alive, complete_gc);
 146     }
 147   }
 148   // Work methods in support of process_phase2
 149   void pp2_work(DiscoveredList&    refs_list,
 150                 BoolObjectClosure* is_alive,
 151                 OopClosure*        keep_alive);
 152   void pp2_work_concurrent_discovery(
 153                 DiscoveredList&    refs_list,
 154                 BoolObjectClosure* is_alive,
 155                 OopClosure*        keep_alive,
 156                 VoidClosure*       complete_gc);
 157   // Phase3: process the referents by either clearing them
 158   // or keeping them alive (and their closure)
 159   void process_phase3(DiscoveredList&    refs_list,
 160                       bool               clear_referent,
 161                       BoolObjectClosure* is_alive,
 162                       OopClosure*        keep_alive,
 163                       VoidClosure*       complete_gc);
 164 
 165   // Enqueue references with a certain reachability level
 166   void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
 167 
 168   // "Preclean" all the discovered reference lists
 169   // by removing references with strongly reachable referents.
 170   // The first argument is a predicate on an oop that indicates
 171   // its (strong) reachability and the second is a closure that
 172   // may be used to incrementalize or abort the precleaning process.
 173   // The caller is responsible for taking care of potential
 174   // interference with concurrent operations on these lists
 175   // (or predicates involved) by other threads. Currently
 176   // only used by the CMS collector.  should_unload_classes is
 177   // used to aid assertion checking when classes are collected.
 178   void preclean_discovered_references(BoolObjectClosure* is_alive,
 179                                       OopClosure*        keep_alive,
 180                                       VoidClosure*       complete_gc,
 181                                       YieldClosure*      yield,
 182                                       bool               should_unload_classes);
 183 
 184   // Delete entries in the discovered lists that have
 185   // either a null referent or are not active. Such
 186   // Reference objects can result from the clearing
 187   // or enqueueing of Reference objects concurrent
 188   // with their discovery by a (concurrent) collector.
 189   // For a definition of "active" see java.lang.ref.Reference;
 190   // Refs are born active, become inactive when enqueued,
 191   // and never become active again. The state of being
 192   // active is encoded as follows: A Ref is active
 193   // if and only if its "next" field is NULL.
 194   void clean_up_discovered_references();
 195   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 196 
 197   // Returns the name of the discovered reference list
 198   // occupying the i / _num_q slot.
 199   const char* list_name(int i);
 200 
 201   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 202 
 203  protected:
 204   // "Preclean" the given discovered reference list
 205   // by removing references with strongly reachable referents.
 206   // Currently used in support of CMS only.
 207   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 208                                    BoolObjectClosure* is_alive,
 209                                    OopClosure*        keep_alive,
 210                                    VoidClosure*       complete_gc,
 211                                    YieldClosure*      yield);
 212 
 213   int next_id() {
 214     int id = _next_id;
 215     if (++_next_id == _num_q) {
 216       _next_id = 0;
 217     }
 218     return id;
 219   }
 220   DiscoveredList* get_discovered_list(ReferenceType rt);
 221   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 222                                         HeapWord* discovered_addr);
 223   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 224 
 225   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 226 
 227   // Calculate the number of jni handles.
 228   unsigned int count_jni_refs();
 229 
 230   // Balances reference queues.
 231   void balance_queues(DiscoveredList ref_lists[]);
 232 
 233   // Update (advance) the soft ref master clock field.
 234   void update_soft_ref_master_clock();
 235 
 236  public:
 237   // constructor
 238   ReferenceProcessor():
 239     _span((HeapWord*)NULL, (HeapWord*)NULL),
 240     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
 241     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
 242     _discovering_refs(false),
 243     _discovery_is_atomic(true),
 244     _enqueuing_is_done(false),
 245     _discovery_is_mt(false),
 246     _discovered_list_needs_barrier(false),
 247     _bs(NULL),
 248     _is_alive_non_header(NULL),
 249     _num_q(0),
 250     _max_num_q(0),
 251     _processing_is_mt(false),
 252     _next_id(0)
 253   {}
 254 
 255   ReferenceProcessor(MemRegion span, bool atomic_discovery,
 256                      bool mt_discovery,
 257                      int mt_degree = 1,
 258                      bool mt_processing = false,
 259                      bool discovered_list_needs_barrier = false);
 260 
 261   // Allocates and initializes a reference processor.
 262   static ReferenceProcessor* create_ref_processor(
 263     MemRegion          span,
 264     bool               atomic_discovery,
 265     bool               mt_discovery,
 266     BoolObjectClosure* is_alive_non_header = NULL,
 267     int                parallel_gc_threads = 1,
 268     bool               mt_processing = false,
 269     bool               discovered_list_needs_barrier = false);
 270 
 271   // RefDiscoveryPolicy values
 272   enum DiscoveryPolicy {
 273     ReferenceBasedDiscovery = 0,
 274     ReferentBasedDiscovery  = 1,
 275     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 276     DiscoveryPolicyMax      = ReferentBasedDiscovery
 277   };
 278 
 279   static void init_statics();
 280 
 281  public:
 282   // get and set "is_alive_non_header" field
 283   BoolObjectClosure* is_alive_non_header() {
 284     return _is_alive_non_header;
 285   }
 286   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 287     _is_alive_non_header = is_alive_non_header;
 288   }
 289 
 290   // get and set span
 291   MemRegion span()                   { return _span; }
 292   void      set_span(MemRegion span) { _span = span; }
 293 
 294   // start and stop weak ref discovery
 295   void enable_discovery()   { _discovering_refs = true;  }
 296   void disable_discovery()  { _discovering_refs = false; }
 297   bool discovery_enabled()  { return _discovering_refs;  }
 298 
 299   // whether discovery is atomic wrt other collectors
 300   bool discovery_is_atomic() const { return _discovery_is_atomic; }
 301   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
 302 
 303   // whether discovery is done by multiple threads same-old-timeously
 304   bool discovery_is_mt() const { return _discovery_is_mt; }
 305   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
 306 
 307   // Whether we are in a phase when _processing_ is MT.
 308   bool processing_is_mt() const { return _processing_is_mt; }
 309   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 310 
 311   // whether all enqueuing of weak references is complete
 312   bool enqueuing_is_done()  { return _enqueuing_is_done; }
 313   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 314 
 315   // iterate over oops
 316   void weak_oops_do(OopClosure* f);       // weak roots
 317   static void oops_do(OopClosure* f);     // strong root(s)
 318 
 319   // Balance each of the discovered lists.
 320   void balance_all_queues();
 321 
 322   // Discover a Reference object, using appropriate discovery criteria
 323   bool discover_reference(oop obj, ReferenceType rt);
 324 
 325   // Process references found during GC (called by the garbage collector)
 326   void process_discovered_references(BoolObjectClosure*           is_alive,
 327                                      OopClosure*                  keep_alive,
 328                                      VoidClosure*                 complete_gc,
 329                                      AbstractRefProcTaskExecutor* task_executor);
 330 
 331  public:
 332   // Enqueue references at end of GC (called by the garbage collector)
 333   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
 334 
 335   // If a discovery is in process that is being superceded, abandon it: all
 336   // the discovered lists will be empty, and all the objects on them will
 337   // have NULL discovered fields.  Must be called only at a safepoint.
 338   void abandon_partial_discovery();
 339 
 340   // debugging
 341   void verify_no_references_recorded() PRODUCT_RETURN;
 342   static void verify();
 343 
 344   // clear the discovered lists (unlinking each entry).
 345   void clear_discovered_references() PRODUCT_RETURN;
 346 };
 347 
 348 // A utility class to disable reference discovery in
 349 // the scope which contains it, for given ReferenceProcessor.
 350 class NoRefDiscovery: StackObj {
 351  private:
 352   ReferenceProcessor* _rp;
 353   bool _was_discovering_refs;
 354  public:
 355   NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
 356     _was_discovering_refs = _rp->discovery_enabled();
 357     if (_was_discovering_refs) {
 358       _rp->disable_discovery();
 359     }
 360   }
 361 
 362   ~NoRefDiscovery() {
 363     if (_was_discovering_refs) {
 364       _rp->enable_discovery();
 365     }
 366   }
 367 };
 368 
 369 
 370 // A utility class to temporarily mutate the span of the
 371 // given ReferenceProcessor in the scope that contains it.
 372 class ReferenceProcessorSpanMutator: StackObj {
 373  private:
 374   ReferenceProcessor* _rp;
 375   MemRegion           _saved_span;
 376 
 377  public:
 378   ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
 379                                 MemRegion span):
 380     _rp(rp) {
 381     _saved_span = _rp->span();
 382     _rp->set_span(span);
 383   }
 384 
 385   ~ReferenceProcessorSpanMutator() {
 386     _rp->set_span(_saved_span);
 387   }
 388 };
 389 
 390 // A utility class to temporarily change the MT'ness of
 391 // reference discovery for the given ReferenceProcessor
 392 // in the scope that contains it.
 393 class ReferenceProcessorMTMutator: StackObj {
 394  private:
 395   ReferenceProcessor* _rp;
 396   bool                _saved_mt;
 397 
 398  public:
 399   ReferenceProcessorMTMutator(ReferenceProcessor* rp,
 400                               bool mt):
 401     _rp(rp) {
 402     _saved_mt = _rp->discovery_is_mt();
 403     _rp->set_mt_discovery(mt);
 404   }
 405 
 406   ~ReferenceProcessorMTMutator() {
 407     _rp->set_mt_discovery(_saved_mt);
 408   }
 409 };
 410 
 411 
 412 // A utility class to temporarily change the disposition
 413 // of the "is_alive_non_header" closure field of the
 414 // given ReferenceProcessor in the scope that contains it.
 415 class ReferenceProcessorIsAliveMutator: StackObj {
 416  private:
 417   ReferenceProcessor* _rp;
 418   BoolObjectClosure*  _saved_cl;
 419 
 420  public:
 421   ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
 422                                    BoolObjectClosure*  cl):
 423     _rp(rp) {
 424     _saved_cl = _rp->is_alive_non_header();
 425     _rp->set_is_alive_non_header(cl);
 426   }
 427 
 428   ~ReferenceProcessorIsAliveMutator() {
 429     _rp->set_is_alive_non_header(_saved_cl);
 430   }
 431 };
 432 
 433 // A utility class to temporarily change the disposition
 434 // of the "discovery_is_atomic" field of the
 435 // given ReferenceProcessor in the scope that contains it.
 436 class ReferenceProcessorAtomicMutator: StackObj {
 437  private:
 438   ReferenceProcessor* _rp;
 439   bool                _saved_atomic_discovery;
 440 
 441  public:
 442   ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
 443                                   bool atomic):
 444     _rp(rp) {
 445     _saved_atomic_discovery = _rp->discovery_is_atomic();
 446     _rp->set_atomic_discovery(atomic);
 447   }
 448 
 449   ~ReferenceProcessorAtomicMutator() {
 450     _rp->set_atomic_discovery(_saved_atomic_discovery);
 451   }
 452 };
 453 
 454 
 455 // A utility class to temporarily change the MT processing
 456 // disposition of the given ReferenceProcessor instance
 457 // in the scope that contains it.
 458 class ReferenceProcessorMTProcMutator: StackObj {
 459  private:
 460   ReferenceProcessor* _rp;
 461   bool  _saved_mt;
 462 
 463  public:
 464   ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
 465                                   bool mt):
 466     _rp(rp) {
 467     _saved_mt = _rp->processing_is_mt();
 468     _rp->set_mt_processing(mt);
 469   }
 470 
 471   ~ReferenceProcessorMTProcMutator() {
 472     _rp->set_mt_processing(_saved_mt);
 473   }
 474 };
 475 
 476 
 477 // This class is an interface used to implement task execution for the
 478 // reference processing.
 479 class AbstractRefProcTaskExecutor {
 480 public:
 481 
 482   // Abstract tasks to execute.
 483   class ProcessTask;
 484   class EnqueueTask;
 485 
 486   // Executes a task using worker threads.
 487   virtual void execute(ProcessTask& task) = 0;
 488   virtual void execute(EnqueueTask& task) = 0;
 489 
 490   // Switch to single threaded mode.
 491   virtual void set_single_threaded_mode() { };
 492 };
 493 
 494 // Abstract reference processing task to execute.
 495 class AbstractRefProcTaskExecutor::ProcessTask {
 496 protected:
 497   ProcessTask(ReferenceProcessor& ref_processor,
 498               DiscoveredList      refs_lists[],
 499               bool                marks_oops_alive)
 500     : _ref_processor(ref_processor),
 501       _refs_lists(refs_lists),
 502       _marks_oops_alive(marks_oops_alive)
 503   { }
 504 
 505 public:
 506   virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
 507                     OopClosure& keep_alive,
 508                     VoidClosure& complete_gc) = 0;
 509 
 510   // Returns true if a task marks some oops as alive.
 511   bool marks_oops_alive() const
 512   { return _marks_oops_alive; }
 513 
 514 protected:
 515   ReferenceProcessor& _ref_processor;
 516   DiscoveredList*     _refs_lists;
 517   const bool          _marks_oops_alive;
 518 };
 519 
 520 // Abstract reference processing task to execute.
 521 class AbstractRefProcTaskExecutor::EnqueueTask {
 522 protected:
 523   EnqueueTask(ReferenceProcessor& ref_processor,
 524               DiscoveredList      refs_lists[],
 525               HeapWord*           pending_list_addr,
 526               oop                 sentinel_ref,
 527               int                 n_queues)
 528     : _ref_processor(ref_processor),
 529       _refs_lists(refs_lists),
 530       _pending_list_addr(pending_list_addr),
 531       _sentinel_ref(sentinel_ref),
 532       _n_queues(n_queues)
 533   { }
 534 
 535 public:
 536   virtual void work(unsigned int work_id) = 0;
 537 
 538 protected:
 539   ReferenceProcessor& _ref_processor;
 540   DiscoveredList*     _refs_lists;
 541   HeapWord*           _pending_list_addr;
 542   oop                 _sentinel_ref;
 543   int                 _n_queues;
 544 };