< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 48959 : 8198420: Remove unused extension point AllocationContextStats
Reviewed-by:


 205   HeapRegionManager _hrm;
 206 
 207   // Manages all allocations with regions except humongous object allocations.
 208   G1Allocator* _allocator;
 209 
 210   // Manages all heap verification.
 211   G1HeapVerifier* _verifier;
 212 
 213   // Outside of GC pauses, the number of bytes used in all regions other
 214   // than the current allocation region(s).
 215   size_t _summary_bytes_used;
 216 
 217   void increase_used(size_t bytes);
 218   void decrease_used(size_t bytes);
 219 
 220   void set_used(size_t bytes);
 221 
 222   // Class that handles archive allocation ranges.
 223   G1ArchiveAllocator* _archive_allocator;
 224 
 225   // Statistics for each allocation context
 226   AllocationContextStats _allocation_context_stats;
 227 
 228   // GC allocation statistics policy for survivors.
 229   G1EvacStats _survivor_evac_stats;
 230 
 231   // GC allocation statistics policy for tenured objects.
 232   G1EvacStats _old_evac_stats;
 233 
 234   // It specifies whether we should attempt to expand the heap after a
 235   // region allocation failure. If heap expansion fails we set this to
 236   // false so that we don't re-attempt the heap expansion (it's likely
 237   // that subsequent expansion attempts will also fail if one fails).
 238   // Currently, it is only consulted during GC and it's reset at the
 239   // start of each GC.
 240   bool _expand_heap_after_alloc_failure;
 241 
 242   // Helper for monitoring and management support.
 243   G1MonitoringSupport* _g1mm;
 244 
 245   // Records whether the region at the given index is (still) a
 246   // candidate for eager reclaim.  Only valid for humongous start
 247   // regions; other regions have unspecified values.  Humongous start


 260       return get_by_index(region);
 261     }
 262   };
 263 
 264   HumongousReclaimCandidates _humongous_reclaim_candidates;
 265   // Stores whether during humongous object registration we found candidate regions.
 266   // If not, we can skip a few steps.
 267   bool _has_humongous_reclaim_candidates;
 268 
 269   volatile uint _gc_time_stamp;
 270 
 271   G1HRPrinter _hr_printer;
 272 
 273   // It decides whether an explicit GC should start a concurrent cycle
 274   // instead of doing a STW GC. Currently, a concurrent cycle is
 275   // explicitly started if:
 276   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 277   // (b) cause == _g1_humongous_allocation
 278   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 279   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
 280   // (e) cause == _update_allocation_context_stats_inc
 281   // (f) cause == _wb_conc_mark
 282   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 283 
 284   // indicates whether we are in young or mixed GC mode
 285   G1CollectorState _collector_state;
 286 
 287   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 288   // concurrent cycles) we have started.
 289   volatile uint _old_marking_cycles_started;
 290 
 291   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 292   // concurrent cycles) we have completed.
 293   volatile uint _old_marking_cycles_completed;
 294 
 295   // This is a non-product method that is helpful for testing. It is
 296   // called at the end of a GC and artificially expands the heap by
 297   // allocating a number of dead regions. This way we can induce very
 298   // frequent marking cycles and stress the cleanup / concurrent
 299   // cleanup code more (as all the regions that will be allocated by
 300   // this method will be found dead by the marking cycle).
 301   void allocate_dummy_regions() PRODUCT_RETURN;


 563     return _verifier;
 564   }
 565 
 566   G1MonitoringSupport* g1mm() {
 567     assert(_g1mm != NULL, "should have been initialized");
 568     return _g1mm;
 569   }
 570 
 571   // Expand the garbage-first heap by at least the given size (in bytes!).
 572   // Returns true if the heap was expanded by the requested amount;
 573   // false otherwise.
 574   // (Rounds up to a HeapRegion boundary.)
 575   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 576 
 577   // Returns the PLAB statistics for a given destination.
 578   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 579 
 580   // Determines PLAB size for a given destination.
 581   inline size_t desired_plab_sz(InCSetState dest);
 582 
 583   inline AllocationContextStats& allocation_context_stats();
 584 
 585   // Do anything common to GC's.
 586   void gc_prologue(bool full);
 587   void gc_epilogue(bool full);
 588 
 589   // Modify the reclaim candidate set and test for presence.
 590   // These are only valid for starts_humongous regions.
 591   inline void set_humongous_reclaim_candidate(uint region, bool value);
 592   inline bool is_humongous_reclaim_candidate(uint region);
 593 
 594   // Remove from the reclaim candidate set.  Also remove from the
 595   // collection set so that later encounters avoid the slow path.
 596   inline void set_humongous_is_live(oop obj);
 597 
 598   // Register the given region to be part of the collection set.
 599   inline void register_humongous_region_with_cset(uint index);
 600   // Register regions with humongous objects (actually on the start region) in
 601   // the in_cset_fast_test table.
 602   void register_humongous_regions_with_cset();
 603   // We register a region with the fast "in collection set" test. We
 604   // simply set to true the array slot corresponding to this region.


1112   inline void old_set_add(HeapRegion* hr);
1113   inline void old_set_remove(HeapRegion* hr);
1114 
1115   size_t non_young_capacity_bytes() {
1116     return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
1117   }
1118 
1119   void set_free_regions_coming();
1120   void reset_free_regions_coming();
1121   bool free_regions_coming() { return _free_regions_coming; }
1122   void wait_while_free_regions_coming();
1123 
1124   // Determine whether the given region is one that we are using as an
1125   // old GC alloc region.
1126   bool is_old_gc_alloc_region(HeapRegion* hr);
1127 
1128   // Perform a collection of the heap; intended for use in implementing
1129   // "System.gc".  This probably implies as full a collection as the
1130   // "CollectedHeap" supports.
1131   virtual void collect(GCCause::Cause cause);
1132 
1133   virtual bool copy_allocation_context_stats(const jint* contexts,
1134                                              jlong* totals,
1135                                              jbyte* accuracy,
1136                                              jint len);
1137 
1138   // True iff an evacuation has failed in the most-recent collection.
1139   bool evacuation_failed() { return _evacuation_failed; }
1140 
1141   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1142   void prepend_to_freelist(FreeRegionList* list);
1143   void decrement_summary_bytes(size_t bytes);
1144 
1145   virtual bool is_in(const void* p) const;
1146 #ifdef ASSERT
1147   // Returns whether p is in one of the available areas of the heap. Slow but
1148   // extensive version.
1149   bool is_in_exact(const void* p) const;
1150 #endif
1151 
1152   // Return "TRUE" iff the given object address is within the collection
1153   // set. Assumes that the reference points into the heap.
1154   inline bool is_in_cset(const HeapRegion *hr);
1155   inline bool is_in_cset(oop obj);
1156   inline bool is_in_cset(HeapWord* addr);




 205   HeapRegionManager _hrm;
 206 
 207   // Manages all allocations with regions except humongous object allocations.
 208   G1Allocator* _allocator;
 209 
 210   // Manages all heap verification.
 211   G1HeapVerifier* _verifier;
 212 
 213   // Outside of GC pauses, the number of bytes used in all regions other
 214   // than the current allocation region(s).
 215   size_t _summary_bytes_used;
 216 
 217   void increase_used(size_t bytes);
 218   void decrease_used(size_t bytes);
 219 
 220   void set_used(size_t bytes);
 221 
 222   // Class that handles archive allocation ranges.
 223   G1ArchiveAllocator* _archive_allocator;
 224 



 225   // GC allocation statistics policy for survivors.
 226   G1EvacStats _survivor_evac_stats;
 227 
 228   // GC allocation statistics policy for tenured objects.
 229   G1EvacStats _old_evac_stats;
 230 
 231   // It specifies whether we should attempt to expand the heap after a
 232   // region allocation failure. If heap expansion fails we set this to
 233   // false so that we don't re-attempt the heap expansion (it's likely
 234   // that subsequent expansion attempts will also fail if one fails).
 235   // Currently, it is only consulted during GC and it's reset at the
 236   // start of each GC.
 237   bool _expand_heap_after_alloc_failure;
 238 
 239   // Helper for monitoring and management support.
 240   G1MonitoringSupport* _g1mm;
 241 
 242   // Records whether the region at the given index is (still) a
 243   // candidate for eager reclaim.  Only valid for humongous start
 244   // regions; other regions have unspecified values.  Humongous start


 257       return get_by_index(region);
 258     }
 259   };
 260 
 261   HumongousReclaimCandidates _humongous_reclaim_candidates;
 262   // Stores whether during humongous object registration we found candidate regions.
 263   // If not, we can skip a few steps.
 264   bool _has_humongous_reclaim_candidates;
 265 
 266   volatile uint _gc_time_stamp;
 267 
 268   G1HRPrinter _hr_printer;
 269 
 270   // It decides whether an explicit GC should start a concurrent cycle
 271   // instead of doing a STW GC. Currently, a concurrent cycle is
 272   // explicitly started if:
 273   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 274   // (b) cause == _g1_humongous_allocation
 275   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 276   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
 277   // (e) cause == _wb_conc_mark

 278   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 279 
 280   // indicates whether we are in young or mixed GC mode
 281   G1CollectorState _collector_state;
 282 
 283   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 284   // concurrent cycles) we have started.
 285   volatile uint _old_marking_cycles_started;
 286 
 287   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 288   // concurrent cycles) we have completed.
 289   volatile uint _old_marking_cycles_completed;
 290 
 291   // This is a non-product method that is helpful for testing. It is
 292   // called at the end of a GC and artificially expands the heap by
 293   // allocating a number of dead regions. This way we can induce very
 294   // frequent marking cycles and stress the cleanup / concurrent
 295   // cleanup code more (as all the regions that will be allocated by
 296   // this method will be found dead by the marking cycle).
 297   void allocate_dummy_regions() PRODUCT_RETURN;


 559     return _verifier;
 560   }
 561 
 562   G1MonitoringSupport* g1mm() {
 563     assert(_g1mm != NULL, "should have been initialized");
 564     return _g1mm;
 565   }
 566 
 567   // Expand the garbage-first heap by at least the given size (in bytes!).
 568   // Returns true if the heap was expanded by the requested amount;
 569   // false otherwise.
 570   // (Rounds up to a HeapRegion boundary.)
 571   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 572 
 573   // Returns the PLAB statistics for a given destination.
 574   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 575 
 576   // Determines PLAB size for a given destination.
 577   inline size_t desired_plab_sz(InCSetState dest);
 578 


 579   // Do anything common to GC's.
 580   void gc_prologue(bool full);
 581   void gc_epilogue(bool full);
 582 
 583   // Modify the reclaim candidate set and test for presence.
 584   // These are only valid for starts_humongous regions.
 585   inline void set_humongous_reclaim_candidate(uint region, bool value);
 586   inline bool is_humongous_reclaim_candidate(uint region);
 587 
 588   // Remove from the reclaim candidate set.  Also remove from the
 589   // collection set so that later encounters avoid the slow path.
 590   inline void set_humongous_is_live(oop obj);
 591 
 592   // Register the given region to be part of the collection set.
 593   inline void register_humongous_region_with_cset(uint index);
 594   // Register regions with humongous objects (actually on the start region) in
 595   // the in_cset_fast_test table.
 596   void register_humongous_regions_with_cset();
 597   // We register a region with the fast "in collection set" test. We
 598   // simply set to true the array slot corresponding to this region.


1106   inline void old_set_add(HeapRegion* hr);
1107   inline void old_set_remove(HeapRegion* hr);
1108 
1109   size_t non_young_capacity_bytes() {
1110     return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
1111   }
1112 
1113   void set_free_regions_coming();
1114   void reset_free_regions_coming();
1115   bool free_regions_coming() { return _free_regions_coming; }
1116   void wait_while_free_regions_coming();
1117 
1118   // Determine whether the given region is one that we are using as an
1119   // old GC alloc region.
1120   bool is_old_gc_alloc_region(HeapRegion* hr);
1121 
1122   // Perform a collection of the heap; intended for use in implementing
1123   // "System.gc".  This probably implies as full a collection as the
1124   // "CollectedHeap" supports.
1125   virtual void collect(GCCause::Cause cause);





1126 
1127   // True iff an evacuation has failed in the most-recent collection.
1128   bool evacuation_failed() { return _evacuation_failed; }
1129 
1130   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1131   void prepend_to_freelist(FreeRegionList* list);
1132   void decrement_summary_bytes(size_t bytes);
1133 
1134   virtual bool is_in(const void* p) const;
1135 #ifdef ASSERT
1136   // Returns whether p is in one of the available areas of the heap. Slow but
1137   // extensive version.
1138   bool is_in_exact(const void* p) const;
1139 #endif
1140 
1141   // Return "TRUE" iff the given object address is within the collection
1142   // set. Assumes that the reference points into the heap.
1143   inline bool is_in_cset(const HeapRegion *hr);
1144   inline bool is_in_cset(oop obj);
1145   inline bool is_in_cset(HeapWord* addr);


< prev index next >