< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 8817 : [mq]: jon-review-statistics


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/concurrentMark.hpp"
  29 #include "gc/g1/evacuationInfo.hpp"
  30 #include "gc/g1/g1AllocationContext.hpp"
  31 #include "gc/g1/g1Allocator.hpp"
  32 #include "gc/g1/g1BiasedArray.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1HRPrinter.hpp"
  35 #include "gc/g1/g1InCSetState.hpp"
  36 #include "gc/g1/g1MonitoringSupport.hpp"

  37 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  38 #include "gc/g1/g1YCTypes.hpp"
  39 #include "gc/g1/hSpaceCounters.hpp"
  40 #include "gc/g1/heapRegionManager.hpp"
  41 #include "gc/g1/heapRegionSet.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "memory/memRegion.hpp"
  45 #include "utilities/stack.hpp"
  46 
  47 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  48 // It uses the "Garbage First" heap organization and algorithm, which
  49 // may combine concurrent marking with parallel, incremental compaction of
  50 // heap subsets that will yield large amounts of garbage.
  51 
  52 // Forward declarations
  53 class HeapRegion;
  54 class HRRSCleanupTask;
  55 class GenerationSpec;
  56 class OopsInHeapRegionClosure;


 167   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 168   bool do_object_b(oop p);
 169 };
 170 
 171 class RefineCardTableEntryClosure;
 172 
 173 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 174  private:
 175   void reset_from_card_cache(uint start_idx, size_t num_regions);
 176  public:
 177   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 178 };
 179 
 180 class G1CollectedHeap : public CollectedHeap {
 181   friend class VM_CollectForMetadataAllocation;
 182   friend class VM_G1CollectForAllocation;
 183   friend class VM_G1CollectFull;
 184   friend class VM_G1IncCollectionPause;
 185   friend class VMStructs;
 186   friend class MutatorAllocRegion;
 187   friend class SurvivorGCAllocRegion;
 188   friend class OldGCAllocRegion;
 189 
 190   // Closures used in implementation.
 191   friend class G1ParScanThreadState;
 192   friend class G1ParTask;
 193   friend class G1PLABAllocator;
 194   friend class G1PrepareCompactClosure;
 195 
 196   // Other related classes.
 197   friend class HeapRegionClaimer;
 198 
 199   // Testing classes.
 200   friend class G1CheckCSetFastTableClosure;
 201 
 202 private:
 203   FlexibleWorkGang* _workers;
 204 
 205   static size_t _humongous_object_threshold_in_words;
 206 
 207   // The secondary free list which contains regions that have been
 208   // freed up during the cleanup process. This will be appended to


 228   // only exception is the humongous set which we leave unaltered. If
 229   // free_list_only is true, it will only tear down the master free
 230   // list. It is called before a Full GC (free_list_only == false) or
 231   // before heap shrinking (free_list_only == true).
 232   void tear_down_region_sets(bool free_list_only);
 233 
 234   // Rebuilds the region sets / lists so that they are repopulated to
 235   // reflect the contents of the heap. The only exception is the
 236   // humongous set which was not torn down in the first place. If
 237   // free_list_only is true, it will only rebuild the master free
 238   // list. It is called after a Full GC (free_list_only == false) or
 239   // after heap shrinking (free_list_only == true).
 240   void rebuild_region_sets(bool free_list_only);
 241 
 242   // Callback for region mapping changed events.
 243   G1RegionMappingChangedListener _listener;
 244 
 245   // The sequence of all heap regions in the heap.
 246   HeapRegionManager _hrm;
 247 
 248   // Handles non-humongous allocations in the G1CollectedHeap.
 249   G1Allocator* _allocator;
 250 
 251   // Outside of GC pauses, the number of bytes used in all regions other
 252   // than the current allocation region(s).
 253   size_t _summary_bytes_used;
 254 
 255   void increase_used(size_t bytes);
 256   void decrease_used(size_t bytes);
 257 
 258   void set_used(size_t bytes);
 259 
 260   // Class that handles archive allocation ranges.
 261   G1ArchiveAllocator* _archive_allocator;
 262 
 263   // Statistics for each allocation context
 264   AllocationContextStats _allocation_context_stats;
 265 
 266   // PLAB sizing policy for survivors.
 267   PLABStats _survivor_plab_stats;
 268 
 269   // PLAB sizing policy for tenured objects.
 270   PLABStats _old_plab_stats;
 271 
 272   // It specifies whether we should attempt to expand the heap after a
 273   // region allocation failure. If heap expansion fails we set this to
 274   // false so that we don't re-attempt the heap expansion (it's likely
 275   // that subsequent expansion attempts will also fail if one fails).
 276   // Currently, it is only consulted during GC and it's reset at the
 277   // start of each GC.
 278   bool _expand_heap_after_alloc_failure;
 279 
 280   // Helper for monitoring and management support.
 281   G1MonitoringSupport* _g1mm;
 282 
 283   // Records whether the region at the given index is (still) a
 284   // candidate for eager reclaim.  Only valid for humongous start
 285   // regions; other regions have unspecified values.  Humongous start
 286   // regions are initialized at start of collection pause, with
 287   // candidates removed from the set as they are found reachable from
 288   // roots or the young generation.
 289   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 290    protected:


 589 
 590 public:
 591   FlexibleWorkGang* workers() const { return _workers; }
 592 
 593   G1Allocator* allocator() {
 594     return _allocator;
 595   }
 596 
 597   G1MonitoringSupport* g1mm() {
 598     assert(_g1mm != NULL, "should have been initialized");
 599     return _g1mm;
 600   }
 601 
 602   // Expand the garbage-first heap by at least the given size (in bytes!).
 603   // Returns true if the heap was expanded by the requested amount;
 604   // false otherwise.
 605   // (Rounds up to a HeapRegion boundary.)
 606   bool expand(size_t expand_bytes);
 607 
 608   // Returns the PLAB statistics for a given destination.
 609   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 610 
 611   // Determines PLAB size for a given destination.
 612   inline size_t desired_plab_sz(InCSetState dest);
 613 
 614   inline AllocationContextStats& allocation_context_stats();
 615 
 616   // Do anything common to GC's.
 617   void gc_prologue(bool full);
 618   void gc_epilogue(bool full);
 619 
 620   // Modify the reclaim candidate set and test for presence.
 621   // These are only valid for starts_humongous regions.
 622   inline void set_humongous_reclaim_candidate(uint region, bool value);
 623   inline bool is_humongous_reclaim_candidate(uint region);
 624 
 625   // Remove from the reclaim candidate set.  Also remove from the
 626   // collection set so that later encounters avoid the slow path.
 627   inline void set_humongous_is_live(oop obj);
 628 
 629   // Register the given region to be part of the collection set.


 772   // Heap_lock when we enter this method, we will pass the
 773   // gc_count_before (i.e., total_collections()) as a parameter since
 774   // it has to be read while holding the Heap_lock. Currently, both
 775   // methods that call do_collection_pause() release the Heap_lock
 776   // before the call, so it's easy to read gc_count_before just before.
 777   HeapWord* do_collection_pause(size_t         word_size,
 778                                 uint           gc_count_before,
 779                                 bool*          succeeded,
 780                                 GCCause::Cause gc_cause);
 781 
 782   void wait_for_root_region_scanning();
 783 
 784   // The guts of the incremental collection pause, executed by the vm
 785   // thread. It returns false if it is unable to do the collection due
 786   // to the GC locker being active, true otherwise
 787   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 788 
 789   // Actually do the work of evacuating the collection set.
 790   void evacuate_collection_set(EvacuationInfo& evacuation_info);
 791 



 792   // The g1 remembered set of the heap.
 793   G1RemSet* _g1_rem_set;
 794 
 795   // A set of cards that cover the objects for which the Rsets should be updated
 796   // concurrently after the collection.
 797   DirtyCardQueueSet _dirty_card_queue_set;
 798 
 799   // The closure used to refine a single card.
 800   RefineCardTableEntryClosure* _refine_cte_cl;
 801 
 802   // A DirtyCardQueueSet that is used to hold cards that contain
 803   // references into the current collection set. This is used to
 804   // update the remembered sets of the regions in the collection
 805   // set in the event of an evacuation failure.
 806   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
 807 
 808   // After a collection pause, make the regions in the CS into free
 809   // regions.
 810   void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
 811 




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/concurrentMark.hpp"
  29 #include "gc/g1/evacuationInfo.hpp"
  30 #include "gc/g1/g1AllocationContext.hpp"
  31 #include "gc/g1/g1Allocator.hpp"
  32 #include "gc/g1/g1BiasedArray.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1HRPrinter.hpp"
  35 #include "gc/g1/g1InCSetState.hpp"
  36 #include "gc/g1/g1MonitoringSupport.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"
  38 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  39 #include "gc/g1/g1YCTypes.hpp"
  40 #include "gc/g1/hSpaceCounters.hpp"
  41 #include "gc/g1/heapRegionManager.hpp"
  42 #include "gc/g1/heapRegionSet.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/collectedHeap.hpp"
  45 #include "memory/memRegion.hpp"
  46 #include "utilities/stack.hpp"
  47 
  48 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  49 // It uses the "Garbage First" heap organization and algorithm, which
  50 // may combine concurrent marking with parallel, incremental compaction of
  51 // heap subsets that will yield large amounts of garbage.
  52 
  53 // Forward declarations
  54 class HeapRegion;
  55 class HRRSCleanupTask;
  56 class GenerationSpec;
  57 class OopsInHeapRegionClosure;


 168   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 169   bool do_object_b(oop p);
 170 };
 171 
 172 class RefineCardTableEntryClosure;
 173 
 174 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 175  private:
 176   void reset_from_card_cache(uint start_idx, size_t num_regions);
 177  public:
 178   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 179 };
 180 
 181 class G1CollectedHeap : public CollectedHeap {
 182   friend class VM_CollectForMetadataAllocation;
 183   friend class VM_G1CollectForAllocation;
 184   friend class VM_G1CollectFull;
 185   friend class VM_G1IncCollectionPause;
 186   friend class VMStructs;
 187   friend class MutatorAllocRegion;
 188   friend class G1GCAllocRegion;

 189 
 190   // Closures used in implementation.
 191   friend class G1ParScanThreadState;
 192   friend class G1ParTask;
 193   friend class G1PLABAllocator;
 194   friend class G1PrepareCompactClosure;
 195 
 196   // Other related classes.
 197   friend class HeapRegionClaimer;
 198 
 199   // Testing classes.
 200   friend class G1CheckCSetFastTableClosure;
 201 
 202 private:
 203   FlexibleWorkGang* _workers;
 204 
 205   static size_t _humongous_object_threshold_in_words;
 206 
 207   // The secondary free list which contains regions that have been
 208   // freed up during the cleanup process. This will be appended to


 228   // only exception is the humongous set which we leave unaltered. If
 229   // free_list_only is true, it will only tear down the master free
 230   // list. It is called before a Full GC (free_list_only == false) or
 231   // before heap shrinking (free_list_only == true).
 232   void tear_down_region_sets(bool free_list_only);
 233 
 234   // Rebuilds the region sets / lists so that they are repopulated to
 235   // reflect the contents of the heap. The only exception is the
 236   // humongous set which was not torn down in the first place. If
 237   // free_list_only is true, it will only rebuild the master free
 238   // list. It is called after a Full GC (free_list_only == false) or
 239   // after heap shrinking (free_list_only == true).
 240   void rebuild_region_sets(bool free_list_only);
 241 
 242   // Callback for region mapping changed events.
 243   G1RegionMappingChangedListener _listener;
 244 
 245   // The sequence of all heap regions in the heap.
 246   HeapRegionManager _hrm;
 247 
 248   // Manages all allocations with regions except humongous object allocations.
 249   G1Allocator* _allocator;
 250 
 251   // Outside of GC pauses, the number of bytes used in all regions other
 252   // than the current allocation region(s).
 253   size_t _summary_bytes_used;
 254 
 255   void increase_used(size_t bytes);
 256   void decrease_used(size_t bytes);
 257 
 258   void set_used(size_t bytes);
 259 
 260   // Class that handles archive allocation ranges.
 261   G1ArchiveAllocator* _archive_allocator;
 262 
 263   // Statistics for each allocation context
 264   AllocationContextStats _allocation_context_stats;
 265 
 266   // GC allocation statistics policy for survivors.
 267   G1EvacStats _survivor_evac_stats;
 268 
 269   // GC allocation statistics policy for tenured objects.
 270   G1EvacStats _old_evac_stats;
 271 
 272   // It specifies whether we should attempt to expand the heap after a
 273   // region allocation failure. If heap expansion fails we set this to
 274   // false so that we don't re-attempt the heap expansion (it's likely
 275   // that subsequent expansion attempts will also fail if one fails).
 276   // Currently, it is only consulted during GC and it's reset at the
 277   // start of each GC.
 278   bool _expand_heap_after_alloc_failure;
 279 
 280   // Helper for monitoring and management support.
 281   G1MonitoringSupport* _g1mm;
 282 
 283   // Records whether the region at the given index is (still) a
 284   // candidate for eager reclaim.  Only valid for humongous start
 285   // regions; other regions have unspecified values.  Humongous start
 286   // regions are initialized at start of collection pause, with
 287   // candidates removed from the set as they are found reachable from
 288   // roots or the young generation.
 289   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 290    protected:


 589 
 590 public:
 591   FlexibleWorkGang* workers() const { return _workers; }
 592 
 593   G1Allocator* allocator() {
 594     return _allocator;
 595   }
 596 
 597   G1MonitoringSupport* g1mm() {
 598     assert(_g1mm != NULL, "should have been initialized");
 599     return _g1mm;
 600   }
 601 
 602   // Expand the garbage-first heap by at least the given size (in bytes!).
 603   // Returns true if the heap was expanded by the requested amount;
 604   // false otherwise.
 605   // (Rounds up to a HeapRegion boundary.)
 606   bool expand(size_t expand_bytes);
 607 
 608   // Returns the PLAB statistics for a given destination.
 609   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 610 
 611   // Determines PLAB size for a given destination.
 612   inline size_t desired_plab_sz(InCSetState dest);
 613 
 614   inline AllocationContextStats& allocation_context_stats();
 615 
 616   // Do anything common to GC's.
 617   void gc_prologue(bool full);
 618   void gc_epilogue(bool full);
 619 
 620   // Modify the reclaim candidate set and test for presence.
 621   // These are only valid for starts_humongous regions.
 622   inline void set_humongous_reclaim_candidate(uint region, bool value);
 623   inline bool is_humongous_reclaim_candidate(uint region);
 624 
 625   // Remove from the reclaim candidate set.  Also remove from the
 626   // collection set so that later encounters avoid the slow path.
 627   inline void set_humongous_is_live(oop obj);
 628 
 629   // Register the given region to be part of the collection set.


 772   // Heap_lock when we enter this method, we will pass the
 773   // gc_count_before (i.e., total_collections()) as a parameter since
 774   // it has to be read while holding the Heap_lock. Currently, both
 775   // methods that call do_collection_pause() release the Heap_lock
 776   // before the call, so it's easy to read gc_count_before just before.
 777   HeapWord* do_collection_pause(size_t         word_size,
 778                                 uint           gc_count_before,
 779                                 bool*          succeeded,
 780                                 GCCause::Cause gc_cause);
 781 
 782   void wait_for_root_region_scanning();
 783 
 784   // The guts of the incremental collection pause, executed by the vm
 785   // thread. It returns false if it is unable to do the collection due
 786   // to the GC locker being active, true otherwise
 787   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 788 
 789   // Actually do the work of evacuating the collection set.
 790   void evacuate_collection_set(EvacuationInfo& evacuation_info);
 791 
 792   // Update object copying statistics.
 793   void record_obj_copy_mem_stats();
 794   
 795   // The g1 remembered set of the heap.
 796   G1RemSet* _g1_rem_set;
 797 
 798   // A set of cards that cover the objects for which the Rsets should be updated
 799   // concurrently after the collection.
 800   DirtyCardQueueSet _dirty_card_queue_set;
 801 
 802   // The closure used to refine a single card.
 803   RefineCardTableEntryClosure* _refine_cte_cl;
 804 
 805   // A DirtyCardQueueSet that is used to hold cards that contain
 806   // references into the current collection set. This is used to
 807   // update the remembered sets of the regions in the collection
 808   // set in the event of an evacuation failure.
 809   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
 810 
 811   // After a collection pause, make the regions in the CS into free
 812   // regions.
 813   void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
 814 


< prev index next >