< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7902 : [mq]: 8073052-Rename-and-clean-up-the-allocation-manager-hierarchy-in-g1Allocator
rev 7903 : imported patch 8073013-add-detailed-information-about-plab-memory-usage
rev 7904 : imported patch 8040162-avoid-reallocating-plab-allocators
rev 7905 : imported patch 8067336-allow-that-plab-allocations-at-the-end-of-regions-are-flexible
rev 7908 : [mq]: 8073317-move-region-level-allocation-into-allocregionmanager

@@ -32,10 +32,11 @@
 #include "gc_implementation/g1/g1AllocRegion.hpp"
 #include "gc_implementation/g1/g1BiasedArray.hpp"
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1InCSetState.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
+#include "gc_implementation/g1/g1EvacStats.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionManager.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
 #include "gc_implementation/shared/hSpaceCounters.hpp"

@@ -53,10 +54,11 @@
 // Forward declarations
 class HeapRegion;
 class HRRSCleanupTask;
 class GenerationSpec;
 class OopsInHeapRegionClosure;
+class G1ParScanThreadState;
 class G1KlassScanClosure;
 class ObjectClosure;
 class SpaceClosure;
 class CompactibleSpaceClosure;
 class Space;

@@ -179,19 +181,14 @@
   friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
   friend class VM_G1CollectFull;
   friend class VM_G1IncCollectionPause;
   friend class VMStructs;
-  friend class MutatorAllocRegion;
-  friend class SurvivorGCAllocRegion;
-  friend class OldGCAllocRegion;
-  friend class G1Allocator;
 
   // Closures used in implementation.
   friend class G1ParScanThreadState;
   friend class G1ParTask;
-  friend class G1ParGCAllocator;
   friend class G1PrepareCompactClosure;
 
   // Other related classes.
   friend class HeapRegionClaimer;
 

@@ -244,46 +241,29 @@
   G1RegionMappingChangedListener _listener;
 
   // The sequence of all heap regions in the heap.
   HeapRegionManager _hrm;
 
-  // Class that handles the different kinds of allocations.
+  // Manages all kinds of allocations within regions. This excludes only
+  // humongous object allocations.
   G1Allocator* _allocator;
 
+  // Outside of GC pauses, the number of bytes used in all regions other
+  // than the current allocation region(s).
+  size_t _summary_bytes_used;
+
   // Statistics for each allocation context
   AllocationContextStats _allocation_context_stats;
 
-  // PLAB sizing policy for survivors.
-  PLABStats _survivor_plab_stats;
-
-  // PLAB sizing policy for tenured objects.
-  PLABStats _old_plab_stats;
-
   // It specifies whether we should attempt to expand the heap after a
   // region allocation failure. If heap expansion fails we set this to
   // false so that we don't re-attempt the heap expansion (it's likely
   // that subsequent expansion attempts will also fail if one fails).
   // Currently, it is only consulted during GC and it's reset at the
   // start of each GC.
   bool _expand_heap_after_alloc_failure;
 
-  // It resets the mutator alloc region before new allocations can take place.
-  void init_mutator_alloc_region();
-
-  // It releases the mutator alloc region.
-  void release_mutator_alloc_region();
-
-  // It initializes the GC alloc regions at the start of a GC.
-  void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
-
-  // It releases the GC alloc regions at the end of a GC.
-  void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
-
-  // It does any cleanup that needs to be done on the GC alloc regions
-  // before a Full GC.
-  void abandon_gc_alloc_regions();
-
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
 
   // Records whether the region at the given index is kept live by roots or
   // references from the young generation.

@@ -520,35 +500,17 @@
   // or not.
   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
                                             AllocationContext_t context,
                                             bool expect_null_mutator_alloc_region);
 
+ public:
   // It dirties the cards that cover the block so that so that the post
   // write barrier never queues anything when updating objects on this
   // block. It is assumed (and in fact we assert) that the block
   // belongs to a young region.
   inline void dirty_young_block(HeapWord* start, size_t word_size);
 
-  // Allocate blocks during garbage collection. Will ensure an
-  // allocation region, either by picking one or expanding the
-  // heap, and then allocate a block of the given size. The block
-  // may not be a humongous - it must fit into a single heap region.
-  inline HeapWord* par_allocate_during_gc(InCSetState dest,
-                                          size_t word_size,
-                                          AllocationContext_t context);
-  // Ensure that no further allocations can happen in "r", bearing in mind
-  // that parallel threads might be attempting allocations.
-  void par_allocate_remaining_space(HeapRegion* r);
-
-  // Allocation attempt during GC for a survivor object / PLAB.
-  inline HeapWord* survivor_attempt_allocation(size_t word_size,
-                                               AllocationContext_t context);
-
-  // Allocation attempt during GC for an old object / PLAB.
-  inline HeapWord* old_attempt_allocation(size_t word_size,
-                                          AllocationContext_t context);
-
   // These methods are the "callbacks" from the G1AllocRegion class.
 
   // For mutator alloc regions.
   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
   void retire_mutator_alloc_region(HeapRegion* alloc_region,

@@ -557,11 +519,11 @@
   // For GC alloc regions.
   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
                                   InCSetState dest);
   void retire_gc_alloc_region(HeapRegion* alloc_region,
                               size_t allocated_bytes, InCSetState dest);
-
+ private:
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
   // - if clear_all_soft_refs is true, all soft references should be
   //   cleared during the GC
   // - if explicit_gc is false, word_size describes the allocation that

@@ -594,11 +556,11 @@
   // allocated block, or else "NULL".
   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 
   // Process any reference objects discovered during
   // an incremental evacuation pause.
-  void process_discovered_references(uint no_of_gc_workers);
+  void process_discovered_references(G1ParScanThreadState** pss, uint no_of_gc_workers);
 
   // Enqueue any remaining discovered references
   // after processing.
   void enqueue_discovered_references(uint no_of_gc_workers);
 

@@ -617,16 +579,10 @@
   // Returns true if the heap was expanded by the requested amount;
   // false otherwise.
   // (Rounds up to a HeapRegion boundary.)
   bool expand(size_t expand_bytes);
 
-  // Returns the PLAB statistics for a given destination.
-  inline PLABStats* alloc_buffer_stats(InCSetState dest);
-
-  // Determines PLAB size for a given destination.
-  inline size_t desired_plab_sz(InCSetState dest);
-
   inline AllocationContextStats& allocation_context_stats();
 
   // Do anything common to GC's.
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);

@@ -696,10 +652,13 @@
 
   G1YCType yc_type();
 
   G1HRPrinter* hr_printer() { return &_hr_printer; }
 
+  // Allocates a new heap region instance.
+  virtual HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
+  
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
   // list later). The used bytes of freed regions are accumulated in
   // pre_used. If par is true, the region's RSet will not be freed

@@ -755,10 +714,25 @@
   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 
   // Actually do the work of evacuating the collection set.
   void evacuate_collection_set(EvacuationInfo& evacuation_info);
 
+  // Print the header for the per-thread termination statistics.
+  static void print_termination_stats_hdr(outputStream* const st);
+  // Print actual per-thread termination statistics.
+  void print_termination_stats(outputStream* const st,
+                               uint worker_id,
+                               double elapsed_ms,
+                               double strong_roots_ms,
+                               double term_ms,
+                               size_t term_attempts,
+                               size_t alloc_buffer_waste,
+                               size_t undo_waste) const;
+  // Update object copying statistics.
+  void record_obj_copy_mem_stats();
+  void record_obj_copy_mem_stats(InCSetState which);
+  
   // The g1 remembered set of the heap.
   G1RemSet* _g1_rem_set;
 
   // A set of cards that cover the objects for which the Rsets should be updated
   // concurrently after the collection.

@@ -1098,10 +1072,20 @@
   // This should be called when we're not holding the heap lock. The
   // result might be a bit inaccurate.
   size_t used_unlocked() const;
   size_t recalculate_used() const;
 
+  void increase_used(size_t bytes) { _summary_bytes_used += bytes; }
+  void set_used(size_t bytes) { _summary_bytes_used = bytes; }
+
+  void decrease_used(size_t bytes) {
+    assert(_summary_bytes_used >= bytes,
+           err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
+               _summary_bytes_used, bytes));
+    _summary_bytes_used -= bytes;
+  }
+
   // These virtual functions do the actual allocation.
   // Some heaps may offer a contiguous region for shared non-blocking
   // allocation, via inlined code (by exporting the address of the top and
   // end fields defining the extent of the contiguous allocation region.)
   // But G1CollectedHeap doesn't yet support this.

@@ -1432,10 +1416,12 @@
     // humongous and that we don't allocate a humongous
     // object in a TLAB.
     return word_size > _humongous_object_threshold_in_words;
   }
 
+  static size_t humongous_object_threshold_in_words() { return _humongous_object_threshold_in_words; }
+  
   // Update mod union table with the set of dirty cards.
   void updateModUnion();
 
   // Set the mod union bits corresponding to the given memRegion.  Note
   // that this is always a safe operation, since it doesn't clear any
< prev index next >