Print this page
rev 2691 : [mq]: g1-reference-processing

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
          +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
↓ open down ↓ 147 lines elided ↑ open up ↑
 148  148  
 149  149  class MutatorAllocRegion : public G1AllocRegion {
 150  150  protected:
 151  151    virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 152  152    virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 153  153  public:
 154  154    MutatorAllocRegion()
 155  155      : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 156  156  };
 157  157  
      158 +// The G1 STW is alive closure.
      159 +// An instance is embedded into the G1CH and used as the
      160 +// (optional) _is_alive_non_header closure in the STW
      161 +// reference processor. It is also extensively used during
      162 +// refence processing during STW evacuation pauses.
      163 +class G1STWIsAliveClosure: public BoolObjectClosure {
      164 +  G1CollectedHeap* _g1;
      165 +public:
      166 +  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
      167 +  void do_object(oop p) { assert(false, "Do not call."); }
      168 +  bool do_object_b(oop p);
      169 +};
      170 +
 158  171  class SurvivorGCAllocRegion : public G1AllocRegion {
 159  172  protected:
 160  173    virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 161  174    virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 162  175  public:
 163  176    SurvivorGCAllocRegion()
 164  177    : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
 165  178  };
 166  179  
 167  180  class OldGCAllocRegion : public G1AllocRegion {
 168  181  protected:
 169  182    virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 170  183    virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 171  184  public:
 172  185    OldGCAllocRegion()
 173  186    : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 174  187  };
 175  188  
 176  189  class RefineCardTableEntryClosure;
      190 +
 177  191  class G1CollectedHeap : public SharedHeap {
 178  192    friend class VM_G1CollectForAllocation;
 179  193    friend class VM_GenCollectForPermanentAllocation;
 180  194    friend class VM_G1CollectFull;
 181  195    friend class VM_G1IncCollectionPause;
 182  196    friend class VMStructs;
 183  197    friend class MutatorAllocRegion;
 184  198    friend class SurvivorGCAllocRegion;
 185  199    friend class OldGCAllocRegion;
 186  200  
↓ open down ↓ 379 lines elided ↑ open up ↑
 566  580    // This function does everything necessary/possible to satisfy a
 567  581    // failed allocation request (including collection, expansion, etc.)
 568  582    HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
 569  583  
 570  584    // Attempting to expand the heap sufficiently
 571  585    // to support an allocation of the given "word_size".  If
 572  586    // successful, perform the allocation and return the address of the
 573  587    // allocated block, or else "NULL".
 574  588    HeapWord* expand_and_allocate(size_t word_size);
 575  589  
      590 +  // Process any reference objects discovered during
      591 +  // an incremental evacuation pause.
      592 +  void process_discovered_references();
      593 +
      594 +  // Enqueue any remaining discovered references
      595 +  // after processing.
      596 +  void enqueue_discovered_references();
      597 +
 576  598  public:
 577  599  
 578  600    G1MonitoringSupport* g1mm() { return _g1mm; }
 579  601  
 580  602    // Expand the garbage-first heap by at least the given size (in bytes!).
 581  603    // Returns true if the heap was expanded by the requested amount;
 582  604    // false otherwise.
 583  605    // (Rounds up to a HeapRegion boundary.)
 584  606    bool expand(size_t expand_bytes);
 585  607  
↓ open down ↓ 232 lines elided ↑ open up ↑
 818  840    // objects.
 819  841    void init_for_evac_failure(OopsInHeapRegionClosure* cl);
 820  842    // Do any necessary cleanup for evacuation-failure handling data
 821  843    // structures.
 822  844    void finalize_for_evac_failure();
 823  845  
 824  846    // An attempt to evacuate "obj" has failed; take necessary steps.
 825  847    oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
 826  848    void handle_evacuation_failure_common(oop obj, markOop m);
 827  849  
 828      -  // Instance of the concurrent mark is_alive closure for embedding
 829      -  // into the reference processor as the is_alive_non_header. This
 830      -  // prevents unnecessary additions to the discovered lists during
 831      -  // concurrent discovery.
 832      -  G1CMIsAliveClosure _is_alive_closure;
      850 +  // ("Weak") Reference processing support.
      851 +  //
      852 +  // G1 has 2 instances of the referece processor class. One
      853 +  // (_ref_processor_cm) handles reference object discovery
      854 +  // and subsequent processing during concurrent marking cycles.
      855 +  //
      856 +  // The other (_ref_processor_stw) handles reference object
      857 +  // discovery and processing during full GCs and incremental
      858 +  // evacuation pauses.
      859 +  //
      860 +  // During an incremental pause, reference discovery will be
      861 +  // temporarily disabled for _ref_processor_cm and will be
      862 +  // enabled for _ref_processor_stw. At the end of the evacuation
      863 +  // pause references discovered by _ref_processor_stw will be
      864 +  // processed and discovery will be disabled. The previous
      865 +  // setting for reference object discovery for _ref_processor_cm
      866 +  // will be re-instated.
      867 +  //
      868 +  // At the start of marking:
      869 +  //  * Discovery by the CM ref processor is verified to be inactive
      870 +  //    and it's discovered lists are empty.
      871 +  //  * Discovery by the CM ref processor is then enabled.
      872 +  //
      873 +  // At the end of marking:
      874 +  //  * Any references on the CM ref processor's discovered
      875 +  //    lists are processed (possibly MT).
      876 +  //
      877 +  // At the start of full GC we:
      878 +  //  * Disable discovery by the CM ref processor and
      879 +  //    empty CM ref processor's discovered lists
      880 +  //    (without processing any entries).
      881 +  //  * Verify that the STW ref processor is inactive and it's
      882 +  //    discovered lists are empty.
      883 +  //  * Temporarily set STW ref processor discovery as single threaded.
      884 +  //  * Temporarily clear the STW ref processor's _is_alive_non_header
      885 +  //    field.
      886 +  //  * Finally enable discovery by the STW ref processor.
      887 +  //
      888 +  // The STW ref processor is used to record any discovered
      889 +  // references during the full GC.
      890 +  //
      891 +  // At the end of a full GC we:
      892 +  //  * Enqueue any reference objects discovered by the STW ref processor
      893 +  //    that have non-live referents. This has the side-effect of
      894 +  //    making the STW ref processor inactive by disabling discovery.
      895 +  //  * Verify that the CM ref processor is still inactive
      896 +  //    and no references have been placed on it's discovered
      897 +  //    lists (also checked as a precondition during initial marking).
      898 +
      899 +  // The (stw) reference processor...
      900 +  ReferenceProcessor* _ref_processor_stw;
      901 +
      902 +  // During reference object discovery, the _is_alive_non_header
      903 +  // closure (if non-null) is applied to the referent object to
      904 +  // determine whether the referent is live. If so then the
      905 +  // reference object does not need to be 'discovered' and can
      906 +  // be treated as a regular oop. This has the benefit of reducing
      907 +  // the number of 'discovered' reference objects that need to
      908 +  // be processed.
      909 +  //
      910 +  // Instance of the is_alive closure for embedding into the
      911 +  // STW reference processor as the _is_alive_non_header field.
      912 +  // Supplying a value for the _is_alive_non_header field is
      913 +  // optional but doing so prevents unnecessary additions to
      914 +  // the discovered lists during reference discovery.
      915 +  G1STWIsAliveClosure _is_alive_closure_stw;
 833  916  
 834      -  // ("Weak") Reference processing support
 835      -  ReferenceProcessor* _ref_processor;
      917 +  // The (concurrent marking) reference processor...
      918 +  ReferenceProcessor* _ref_processor_cm;
      919 +
      920 +  // Instance of the concurrent mark is_alive closure for embedding
      921 +  // into the Concurrent Marking reference processor as the
      922 +  // _is_alive_non_header field. Supplying a value for the
      923 +  // _is_alive_non_header field is optional but doing so prevents
      924 +  // unnecessary additions to the discovered lists during reference
      925 +  // discovery.
      926 +  G1CMIsAliveClosure _is_alive_closure_cm;
 836  927  
 837  928    enum G1H_process_strong_roots_tasks {
 838  929      G1H_PS_mark_stack_oops_do,
 839  930      G1H_PS_refProcessor_oops_do,
 840  931      // Leave this one last.
 841  932      G1H_PS_NumElements
 842  933    };
 843  934  
 844  935    SubTasksDone* _process_strong_tasks;
 845  936  
↓ open down ↓ 20 lines elided ↑ open up ↑
 866  957    // Create a G1CollectedHeap with the specified policy.
 867  958    // Must call the initialize method afterwards.
 868  959    // May not return if something goes wrong.
 869  960    G1CollectedHeap(G1CollectorPolicy* policy);
 870  961  
 871  962    // Initialize the G1CollectedHeap to have the initial and
 872  963    // maximum sizes, permanent generation, and remembered and barrier sets
 873  964    // specified by the policy object.
 874  965    jint initialize();
 875  966  
      967 +  // Initialize weak reference processing.
 876  968    virtual void ref_processing_init();
 877  969  
 878  970    void set_par_threads(int t) {
 879  971      SharedHeap::set_par_threads(t);
 880  972      _process_strong_tasks->set_n_threads(t);
 881  973    }
 882  974  
 883  975    virtual CollectedHeap::Name kind() const {
 884  976      return CollectedHeap::G1CollectedHeap;
 885  977    }
↓ open down ↓ 31 lines elided ↑ open up ↑
 917 1009      OrderAccess::fence();
 918 1010    }
 919 1011  
 920 1012    void iterate_dirty_card_closure(CardTableEntryClosure* cl,
 921 1013                                    DirtyCardQueue* into_cset_dcq,
 922 1014                                    bool concurrent, int worker_i);
 923 1015  
 924 1016    // The shared block offset table array.
 925 1017    G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
 926 1018  
 927      -  // Reference Processing accessor
 928      -  ReferenceProcessor* ref_processor() { return _ref_processor; }
     1019 +  // Reference Processing accessors
     1020 +
     1021 +  // The STW reference processor....
     1022 +  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
     1023 +
     1024 +  // The Concurent Marking reference processor...
     1025 +  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 929 1026  
 930 1027    virtual size_t capacity() const;
 931 1028    virtual size_t used() const;
 932 1029    // This should be called when we're not holding the heap lock. The
 933 1030    // result might be a bit inaccurate.
 934 1031    size_t used_unlocked() const;
 935 1032    size_t recalculate_used() const;
 936 1033  
 937 1034    // These virtual functions do the actual allocation.
 938 1035    // Some heaps may offer a contiguous region for shared non-blocking
↓ open down ↓ 1039 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX