Print this page
rev 2585 : [mq]: g1-reference-processing

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
          +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
↓ open down ↓ 147 lines elided ↑ open up ↑
 148  148  
 149  149  class MutatorAllocRegion : public G1AllocRegion {
 150  150  protected:
 151  151    virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 152  152    virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 153  153  public:
 154  154    MutatorAllocRegion()
 155  155      : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 156  156  };
 157  157  
      158 +// The G1 STW is alive closure.
      159 +// An instance is embedded into the G1CH and used as the
      160 +// _is_alive_non_header closure in the STW reference
      161 +// processor. It is also extensively used during refence
      162 +// processing during STW evacuation pauses.
      163 +class G1STWIsAliveClosure: public BoolObjectClosure {
      164 +  G1CollectedHeap* _g1;
      165 +public:
      166 +  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
      167 +  void do_object(oop p) { assert(false, "Do not call."); }
      168 +  bool do_object_b(oop p);
      169 +};
      170 +
 158  171  class RefineCardTableEntryClosure;
      172 +
 159  173  class G1CollectedHeap : public SharedHeap {
 160  174    friend class VM_G1CollectForAllocation;
 161  175    friend class VM_GenCollectForPermanentAllocation;
 162  176    friend class VM_G1CollectFull;
 163  177    friend class VM_G1IncCollectionPause;
 164  178    friend class VMStructs;
 165  179    friend class MutatorAllocRegion;
 166  180  
 167  181    // Closures used in implementation.
 168  182    friend class G1ParCopyHelper;
↓ open down ↓ 390 lines elided ↑ open up ↑
 559  573    // This function does everything necessary/possible to satisfy a
 560  574    // failed allocation request (including collection, expansion, etc.)
 561  575    HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
 562  576  
 563  577    // Attempting to expand the heap sufficiently
 564  578    // to support an allocation of the given "word_size".  If
 565  579    // successful, perform the allocation and return the address of the
 566  580    // allocated block, or else "NULL".
 567  581    HeapWord* expand_and_allocate(size_t word_size);
 568  582  
      583 +  // Process any reference objects discovered during
      584 +  // an incremental evacuation pause.
      585 +  void process_discovered_references();
      586 +
      587 +  // Enqueue any remaining discovered references
      588 +  // after processing.
      589 +  void enqueue_discovered_references();
      590 +
 569  591  public:
 570  592  
 571  593    G1MonitoringSupport* g1mm() { return _g1mm; }
 572  594  
 573  595    // Expand the garbage-first heap by at least the given size (in bytes!).
 574  596    // Returns true if the heap was expanded by the requested amount;
 575  597    // false otherwise.
 576  598    // (Rounds up to a HeapRegion boundary.)
 577  599    bool expand(size_t expand_bytes);
 578  600  
↓ open down ↓ 253 lines elided ↑ open up ↑
 832  854    // case. Said regions are kept in the _retained_gc_alloc_regions[]
 833  855    // array. If the parameter totally is set, we will not retain any
 834  856    // regions, irrespective of what _retain_gc_alloc_region[]
 835  857    // indicates.
 836  858    void release_gc_alloc_regions(bool totally);
 837  859  #ifndef PRODUCT
 838  860    // Useful for debugging.
 839  861    void print_gc_alloc_regions();
 840  862  #endif // !PRODUCT
 841  863  
 842      -  // Instance of the concurrent mark is_alive closure for embedding
 843      -  // into the reference processor as the is_alive_non_header. This
 844      -  // prevents unnecessary additions to the discovered lists during
 845      -  // concurrent discovery.
 846      -  G1CMIsAliveClosure _is_alive_closure;
      864 +  // ("Weak") Reference processing support.
      865 +  //
      866 +  // G1 has 2 instances of the referece processor class. One
      867 +  // (_ref_processor_cm) handles reference object discovery
      868 +  // and subsequent processing during concurrent marking cycles.
      869 +  //
      870 +  // The other (_ref_processor_stw) handles reference object
      871 +  // discovery and processing during full GCs and incremental
      872 +  // evacuation pauses.
      873 +  //
      874 +  // During an incremental pause, reference discovery will be
      875 +  // temporarily disabled for _ref_processor_cm and will be
      876 +  // enabled for _ref_processor_stw. At the end of the evacuation
      877 +  // pause references discovered by _ref_processor_stw will be
      878 +  // processed and discovery will be disabled. The previous
      879 +  // setting for reference object discovery for _ref_processor_cm
      880 +  // will be re-instated.
      881 +  //
      882 +  // At the start of marking:
      883 +  //  * Discovery by the CM ref processor is verified to be inactive
      884 +  //    and it's discovered lists are empty.
      885 +  //  * Discovery by the CM ref processor is then enabled.
      886 +  //
      887 +  // At the end of marking:
      888 +  //  * Any references on the CM ref processor's discovered
      889 +  //    lists are processed (possibly MT).
      890 +  //
      891 +  // At the start of full GC we:
      892 +  //  * Disable discovery by the CM ref processor and
      893 +  //    empty CM ref processor's discovered lists
      894 +  //    (without processing any entries).
      895 +  //  * Verify that the STW ref processor is inactive and it's
      896 +  //    discovered lists are empty.
      897 +  //  * Temporarily set STW ref processor discovery as single threaded.
      898 +  //  * Temporarily clear the STW ref processor's _is_alive_non_header
      899 +  //    field.
      900 +  //  * Finally enable discovery by the STW ref processor.
      901 +  //
      902 +  // The STW ref processor is used to record any discovered
      903 +  // references during the full GC.
      904 +  //
      905 +  // At the end of a full GC we:
      906 +  //  * Will enqueue any non-live discovered references on the
      907 +  //    STW ref processor's discovered lists. This makes the
      908 +  //    STW ref processor inactive by disabling discovery.
      909 +  //  * Verify that the CM ref processor is still inactive
      910 +  //    and no references have been placed on it's discovered
      911 +  //    lists (also checked as a precondition during initial marking).
      912 +
      913 +  // The (stw) reference processor...
      914 +  ReferenceProcessor* _ref_processor_stw;
      915 +
      916 +  // Instance of the is_alive closure for embedding into the
      917 +  // STW reference processor as the _is_alive_non_header field.
      918 +  // The _is_alive_non_header prevents unnecessary additions to
      919 +  // the discovered lists during reference discovery.
      920 +  G1STWIsAliveClosure _is_alive_closure_stw;
 847  921  
 848      -  // ("Weak") Reference processing support
 849      -  ReferenceProcessor* _ref_processor;
      922 +  // The (concurrent marking) reference processor...
      923 +  ReferenceProcessor* _ref_processor_cm;
      924 +
      925 +  // Instance of the concurrent mark is_alive closure for embedding
      926 +  // into the Concurrent Marking reference processor as the 
      927 +  // _is_alive_non_header field. The _is_alive_non_header
      928 +  // prevents unnecessary additions to the discovered lists
      929 +  // during concurrent discovery.
      930 +  G1CMIsAliveClosure _is_alive_closure_cm;
 850  931  
 851  932    enum G1H_process_strong_roots_tasks {
 852  933      G1H_PS_mark_stack_oops_do,
 853  934      G1H_PS_refProcessor_oops_do,
 854  935      // Leave this one last.
 855  936      G1H_PS_NumElements
 856  937    };
 857  938  
 858  939    SubTasksDone* _process_strong_tasks;
 859  940  
↓ open down ↓ 20 lines elided ↑ open up ↑
 880  961    // Create a G1CollectedHeap with the specified policy.
 881  962    // Must call the initialize method afterwards.
 882  963    // May not return if something goes wrong.
 883  964    G1CollectedHeap(G1CollectorPolicy* policy);
 884  965  
 885  966    // Initialize the G1CollectedHeap to have the initial and
 886  967    // maximum sizes, permanent generation, and remembered and barrier sets
 887  968    // specified by the policy object.
 888  969    jint initialize();
 889  970  
      971 +  // Initialize weak reference processing.
 890  972    virtual void ref_processing_init();
 891  973  
 892  974    void set_par_threads(int t) {
 893  975      SharedHeap::set_par_threads(t);
 894  976      _process_strong_tasks->set_n_threads(t);
 895  977    }
 896  978  
 897  979    virtual CollectedHeap::Name kind() const {
 898  980      return CollectedHeap::G1CollectedHeap;
 899  981    }
↓ open down ↓ 31 lines elided ↑ open up ↑
 931 1013      OrderAccess::fence();
 932 1014    }
 933 1015  
 934 1016    void iterate_dirty_card_closure(CardTableEntryClosure* cl,
 935 1017                                    DirtyCardQueue* into_cset_dcq,
 936 1018                                    bool concurrent, int worker_i);
 937 1019  
 938 1020    // The shared block offset table array.
 939 1021    G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
 940 1022  
 941      -  // Reference Processing accessor
 942      -  ReferenceProcessor* ref_processor() { return _ref_processor; }
     1023 +  // Reference Processing accessors
     1024 +
     1025 +  // The STW reference processor....
     1026 +  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
     1027 +
     1028 +  // The Concurent Marking reference processor...
     1029 +  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 943 1030  
 944 1031    virtual size_t capacity() const;
 945 1032    virtual size_t used() const;
 946 1033    // This should be called when we're not holding the heap lock. The
 947 1034    // result might be a bit inaccurate.
 948 1035    size_t used_unlocked() const;
 949 1036    size_t recalculate_used() const;
 950 1037  #ifndef PRODUCT
 951 1038    size_t recalculate_used_regions() const;
 952 1039  #endif // PRODUCT
↓ open down ↓ 1058 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX