< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7902 : [mq]: 8073052-Rename-and-clean-up-the-allocation-manager-hierarchy-in-g1Allocator


 167 };
 168 
 169 class RefineCardTableEntryClosure;
 170 
 171 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 172  private:
 173   void reset_from_card_cache(uint start_idx, size_t num_regions);
 174  public:
 175   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 176 };
 177 
 178 class G1CollectedHeap : public SharedHeap {
 179   friend class VM_CollectForMetadataAllocation;
 180   friend class VM_G1CollectForAllocation;
 181   friend class VM_G1CollectFull;
 182   friend class VM_G1IncCollectionPause;
 183   friend class VMStructs;
 184   friend class MutatorAllocRegion;
 185   friend class SurvivorGCAllocRegion;
 186   friend class OldGCAllocRegion;
 187   friend class G1Allocator;
 188 
 189   // Closures used in implementation.
 190   friend class G1ParScanThreadState;
 191   friend class G1ParTask;
 192   friend class G1ParGCAllocator;
 193   friend class G1PrepareCompactClosure;
 194 
 195   // Other related classes.
 196   friend class HeapRegionClaimer;
 197 
 198   // Testing classes.
 199   friend class G1CheckCSetFastTableClosure;
 200 
 201 private:
 202   // The one and only G1CollectedHeap, so static functions can find it.
 203   static G1CollectedHeap* _g1h;
 204 
 205   static size_t _humongous_object_threshold_in_words;
 206 
 207   // The secondary free list which contains regions that have been
 208   // freed up during the cleanup process. This will be appended to
 209   // the master free list when appropriate.
 210   FreeRegionList _secondary_free_list;
 211 
 212   // It keeps track of the old regions.


 229   // only exception is the humongous set which we leave unaltered. If
 230   // free_list_only is true, it will only tear down the master free
 231   // list. It is called before a Full GC (free_list_only == false) or
 232   // before heap shrinking (free_list_only == true).
 233   void tear_down_region_sets(bool free_list_only);
 234 
 235   // Rebuilds the region sets / lists so that they are repopulated to
 236   // reflect the contents of the heap. The only exception is the
 237   // humongous set which was not torn down in the first place. If
 238   // free_list_only is true, it will only rebuild the master free
 239   // list. It is called after a Full GC (free_list_only == false) or
 240   // after heap shrinking (free_list_only == true).
 241   void rebuild_region_sets(bool free_list_only);
 242 
 243   // Callback for region mapping changed events.
 244   G1RegionMappingChangedListener _listener;
 245 
 246   // The sequence of all heap regions in the heap.
 247   HeapRegionManager _hrm;
 248 
 249   // Class that handles the different kinds of allocations.
 250   G1Allocator* _allocator;
 251 
 252   // Statistics for each allocation context
 253   AllocationContextStats _allocation_context_stats;
 254 
 255   // PLAB sizing policy for survivors.
 256   PLABStats _survivor_plab_stats;
 257 
 258   // PLAB sizing policy for tenured objects.
 259   PLABStats _old_plab_stats;
 260 
 261   // It specifies whether we should attempt to expand the heap after a
 262   // region allocation failure. If heap expansion fails we set this to
 263   // false so that we don't re-attempt the heap expansion (it's likely
 264   // that subsequent expansion attempts will also fail if one fails).
 265   // Currently, it is only consulted during GC and it's reset at the
 266   // start of each GC.
 267   bool _expand_heap_after_alloc_failure;
 268 
 269   // It resets the mutator alloc region before new allocations can take place.
 270   void init_mutator_alloc_region();


 587   HeapWord* satisfy_failed_allocation(size_t word_size,
 588                                       AllocationContext_t context,
 589                                       bool* succeeded);
 590 
 591   // Attempting to expand the heap sufficiently
 592   // to support an allocation of the given "word_size".  If
 593   // successful, perform the allocation and return the address of the
 594   // allocated block, or else "NULL".
 595   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 596 
 597   // Process any reference objects discovered during
 598   // an incremental evacuation pause.
 599   void process_discovered_references(uint no_of_gc_workers);
 600 
 601   // Enqueue any remaining discovered references
 602   // after processing.
 603   void enqueue_discovered_references(uint no_of_gc_workers);
 604 
 605 public:
 606 
 607   G1Allocator* allocator() {
 608     return _allocator;
 609   }
 610 
 611   G1MonitoringSupport* g1mm() {
 612     assert(_g1mm != NULL, "should have been initialized");
 613     return _g1mm;
 614   }
 615 
 616   // Expand the garbage-first heap by at least the given size (in bytes!).
 617   // Returns true if the heap was expanded by the requested amount;
 618   // false otherwise.
 619   // (Rounds up to a HeapRegion boundary.)
 620   bool expand(size_t expand_bytes);
 621 
 622   // Returns the PLAB statistics for a given destination.
 623   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 624 
 625   // Determines PLAB size for a given destination.
 626   inline size_t desired_plab_sz(InCSetState dest);
 627 




 167 };
 168 
 169 class RefineCardTableEntryClosure;
 170 
 171 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 172  private:
 173   void reset_from_card_cache(uint start_idx, size_t num_regions);
 174  public:
 175   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 176 };
 177 
 178 class G1CollectedHeap : public SharedHeap {
 179   friend class VM_CollectForMetadataAllocation;
 180   friend class VM_G1CollectForAllocation;
 181   friend class VM_G1CollectFull;
 182   friend class VM_G1IncCollectionPause;
 183   friend class VMStructs;
 184   friend class MutatorAllocRegion;
 185   friend class SurvivorGCAllocRegion;
 186   friend class OldGCAllocRegion;
 187   friend class AllocRegionManager;
 188 
 189   // Closures used in implementation.
 190   friend class G1ParScanThreadState;
 191   friend class G1ParTask;
 192   friend class PLABAllocator;
 193   friend class G1PrepareCompactClosure;
 194 
 195   // Other related classes.
 196   friend class HeapRegionClaimer;
 197 
 198   // Testing classes.
 199   friend class G1CheckCSetFastTableClosure;
 200 
 201 private:
 202   // The one and only G1CollectedHeap, so static functions can find it.
 203   static G1CollectedHeap* _g1h;
 204 
 205   static size_t _humongous_object_threshold_in_words;
 206 
 207   // The secondary free list which contains regions that have been
 208   // freed up during the cleanup process. This will be appended to
 209   // the master free list when appropriate.
 210   FreeRegionList _secondary_free_list;
 211 
 212   // It keeps track of the old regions.


 229   // only exception is the humongous set which we leave unaltered. If
 230   // free_list_only is true, it will only tear down the master free
 231   // list. It is called before a Full GC (free_list_only == false) or
 232   // before heap shrinking (free_list_only == true).
 233   void tear_down_region_sets(bool free_list_only);
 234 
 235   // Rebuilds the region sets / lists so that they are repopulated to
 236   // reflect the contents of the heap. The only exception is the
 237   // humongous set which was not torn down in the first place. If
 238   // free_list_only is true, it will only rebuild the master free
 239   // list. It is called after a Full GC (free_list_only == false) or
 240   // after heap shrinking (free_list_only == true).
 241   void rebuild_region_sets(bool free_list_only);
 242 
 243   // Callback for region mapping changed events.
 244   G1RegionMappingChangedListener _listener;
 245 
 246   // The sequence of all heap regions in the heap.
 247   HeapRegionManager _hrm;
 248 
 249   // Handles the different kinds of allocations within a region.
 250   AllocRegionManager* _allocator;
 251 
 252   // Statistics for each allocation context
 253   AllocationContextStats _allocation_context_stats;
 254 
 255   // PLAB sizing policy for survivors.
 256   PLABStats _survivor_plab_stats;
 257 
 258   // PLAB sizing policy for tenured objects.
 259   PLABStats _old_plab_stats;
 260 
 261   // It specifies whether we should attempt to expand the heap after a
 262   // region allocation failure. If heap expansion fails we set this to
 263   // false so that we don't re-attempt the heap expansion (it's likely
 264   // that subsequent expansion attempts will also fail if one fails).
 265   // Currently, it is only consulted during GC and it's reset at the
 266   // start of each GC.
 267   bool _expand_heap_after_alloc_failure;
 268 
 269   // It resets the mutator alloc region before new allocations can take place.
 270   void init_mutator_alloc_region();


 587   HeapWord* satisfy_failed_allocation(size_t word_size,
 588                                       AllocationContext_t context,
 589                                       bool* succeeded);
 590 
 591   // Attempting to expand the heap sufficiently
 592   // to support an allocation of the given "word_size".  If
 593   // successful, perform the allocation and return the address of the
 594   // allocated block, or else "NULL".
 595   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 596 
 597   // Process any reference objects discovered during
 598   // an incremental evacuation pause.
 599   void process_discovered_references(uint no_of_gc_workers);
 600 
 601   // Enqueue any remaining discovered references
 602   // after processing.
 603   void enqueue_discovered_references(uint no_of_gc_workers);
 604 
 605 public:
 606 
 607   AllocRegionManager* allocator() {
 608     return _allocator;
 609   }
 610 
 611   G1MonitoringSupport* g1mm() {
 612     assert(_g1mm != NULL, "should have been initialized");
 613     return _g1mm;
 614   }
 615 
 616   // Expand the garbage-first heap by at least the given size (in bytes!).
 617   // Returns true if the heap was expanded by the requested amount;
 618   // false otherwise.
 619   // (Rounds up to a HeapRegion boundary.)
 620   bool expand(size_t expand_bytes);
 621 
 622   // Returns the PLAB statistics for a given destination.
 623   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 624 
 625   // Determines PLAB size for a given destination.
 626   inline size_t desired_plab_sz(InCSetState dest);
 627 


< prev index next >