Print this page


Split Close
Expand all
Collapse all
          --- old/src/share/vm/memory/referenceProcessor.hpp
          +++ new/src/share/vm/memory/referenceProcessor.hpp
↓ open down ↓ 63 lines elided ↑ open up ↑
  64   64    // write barrier(s).  (Must be true if used in a collector in which
  65   65    // elements of a discovered list may be moved during discovery: for
  66   66    // example, a collector like Garbage-First that moves objects during a
  67   67    // long-term concurrent marking phase that does weak reference
  68   68    // discovery.)
  69   69    bool        _discovered_list_needs_barrier;
  70   70    BarrierSet* _bs;                    // Cached copy of BarrierSet.
  71   71    bool        _enqueuing_is_done;     // true if all weak references enqueued
  72   72    bool        _processing_is_mt;      // true during phases when
  73   73                                        // reference processing is MT.
  74      -  int         _next_id;               // round-robin counter in
       74 +  int         _next_id;               // round-robin mod _num_q counter in
  75   75                                        // support of work distribution
  76   76  
  77   77    // For collectors that do not keep GC marking information
  78   78    // in the object header, this field holds a closure that
  79   79    // helps the reference processor determine the reachability
  80   80    // of an oop (the field is currently initialized to NULL for
  81   81    // all collectors but the CMS collector).
  82   82    BoolObjectClosure* _is_alive_non_header;
  83   83  
  84   84    // Soft ref clearing policies
↓ open down ↓ 11 lines elided ↑ open up ↑
  96   96    // The maximum MT'ness degree of the queues below
  97   97    int             _max_num_q;
  98   98    // Arrays of lists of oops, one per thread
  99   99    DiscoveredList* _discoveredSoftRefs;
 100  100    DiscoveredList* _discoveredWeakRefs;
 101  101    DiscoveredList* _discoveredFinalRefs;
 102  102    DiscoveredList* _discoveredPhantomRefs;
 103  103  
 104  104   public:
 105  105    int num_q()                            { return _num_q; }
 106      -  void set_mt_degree(int v)              { _num_q = v; }
      106 +  int max_num_q()                        { return _max_num_q; }
      107 +  void set_active_mt_degree(int v)       { _num_q = v; }
 107  108    DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
 108  109    static oop  sentinel_ref()             { return _sentinelRef; }
 109  110    static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 110  111    ReferencePolicy* setup_policy(bool always_clear) {
 111  112      _current_soft_ref_policy = always_clear ?
 112  113        _always_clear_soft_ref_policy : _default_soft_ref_policy;
 113  114      _current_soft_ref_policy->setup();   // snapshot the policy threshold
 114  115      return _current_soft_ref_policy;
 115  116    }
 116  117  
↓ open down ↓ 92 lines elided ↑ open up ↑
 209  210   protected:
 210  211    // "Preclean" the given discovered reference list
 211  212    // by removing references with strongly reachable referents.
 212  213    // Currently used in support of CMS only.
 213  214    void preclean_discovered_reflist(DiscoveredList&    refs_list,
 214  215                                     BoolObjectClosure* is_alive,
 215  216                                     OopClosure*        keep_alive,
 216  217                                     VoidClosure*       complete_gc,
 217  218                                     YieldClosure*      yield);
 218  219  
      220 +  // round-robin mod _num_q (not: _not_ mode _max_num_q)
 219  221    int next_id() {
 220  222      int id = _next_id;
 221  223      if (++_next_id == _num_q) {
 222  224        _next_id = 0;
 223  225      }
 224  226      return id;
 225  227    }
 226  228    DiscoveredList* get_discovered_list(ReferenceType rt);
 227  229    inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 228  230                                          HeapWord* discovered_addr);
↓ open down ↓ 20 lines elided ↑ open up ↑
 249  251      _discovery_is_atomic(true),
 250  252      _enqueuing_is_done(false),
 251  253      _discovery_is_mt(false),
 252  254      _discovered_list_needs_barrier(false),
 253  255      _bs(NULL),
 254  256      _is_alive_non_header(NULL),
 255  257      _num_q(0),
 256  258      _max_num_q(0),
 257  259      _processing_is_mt(false),
 258  260      _next_id(0)
 259      -  {}
      261 +  { }
 260  262  
 261      -  ReferenceProcessor(MemRegion span, bool atomic_discovery,
 262      -                     bool mt_discovery,
 263      -                     int mt_degree = 1,
 264      -                     bool mt_processing = false,
      263 +  // Default parameters give you a vanilla reference processor.
      264 +  ReferenceProcessor(MemRegion span,
      265 +                     bool mt_processing = false, int mt_processing_degree = 1,
      266 +                     bool mt_discovery  = false, int mt_discovery_degree  = 1,
      267 +                     bool atomic_discovery = true,
      268 +                     BoolObjectClosure* is_alive_non_header = NULL,
 265  269                       bool discovered_list_needs_barrier = false);
 266  270  
 267      -  // Allocates and initializes a reference processor.
 268      -  static ReferenceProcessor* create_ref_processor(
 269      -    MemRegion          span,
 270      -    bool               atomic_discovery,
 271      -    bool               mt_discovery,
 272      -    BoolObjectClosure* is_alive_non_header = NULL,
 273      -    int                parallel_gc_threads = 1,
 274      -    bool               mt_processing = false,
 275      -    bool               discovered_list_needs_barrier = false);
 276      -
 277  271    // RefDiscoveryPolicy values
 278  272    enum DiscoveryPolicy {
 279  273      ReferenceBasedDiscovery = 0,
 280  274      ReferentBasedDiscovery  = 1,
 281  275      DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 282  276      DiscoveryPolicyMax      = ReferentBasedDiscovery
 283  277    };
 284  278  
 285  279    static void init_statics();
 286  280  
↓ open down ↓ 103 lines elided ↑ open up ↑
 390  384    }
 391  385  
 392  386    ~ReferenceProcessorSpanMutator() {
 393  387      _rp->set_span(_saved_span);
 394  388    }
 395  389  };
 396  390  
 397  391  // A utility class to temporarily change the MT'ness of
 398  392  // reference discovery for the given ReferenceProcessor
 399  393  // in the scope that contains it.
 400      -class ReferenceProcessorMTMutator: StackObj {
      394 +class ReferenceProcessorMTDiscoveryMutator: StackObj {
 401  395   private:
 402  396    ReferenceProcessor* _rp;
 403  397    bool                _saved_mt;
 404  398  
 405  399   public:
 406      -  ReferenceProcessorMTMutator(ReferenceProcessor* rp,
 407      -                              bool mt):
      400 +  ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
      401 +                                       bool mt):
 408  402      _rp(rp) {
 409  403      _saved_mt = _rp->discovery_is_mt();
 410  404      _rp->set_mt_discovery(mt);
 411  405    }
 412  406  
 413      -  ~ReferenceProcessorMTMutator() {
      407 +  ~ReferenceProcessorMTDiscoveryMutator() {
 414  408      _rp->set_mt_discovery(_saved_mt);
 415  409    }
 416  410  };
 417  411  
 418  412  
 419  413  // A utility class to temporarily change the disposition
 420  414  // of the "is_alive_non_header" closure field of the
 421  415  // given ReferenceProcessor in the scope that contains it.
 422  416  class ReferenceProcessorIsAliveMutator: StackObj {
 423  417   private:
↓ open down ↓ 130 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX