< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 8125 : imported patch fix
rev 8127 : [mq]: inc4


 203 
 204 private:
 205   // The one and only G1CollectedHeap, so static functions can find it.
 206   static G1CollectedHeap* _g1h;
 207 
 208   FlexibleWorkGang* _workers;
 209 
 210   static size_t _humongous_object_threshold_in_words;
 211 
 212   // The secondary free list which contains regions that have been
 213   // freed up during the cleanup process. This will be appended to
 214   // the master free list when appropriate.
 215   FreeRegionList _secondary_free_list;
 216 
 217   // It keeps track of the old regions.
 218   HeapRegionSet _old_set;
 219 
 220   // It keeps track of the humongous regions.
 221   HeapRegionSet _humongous_set;
 222 
 223   void clear_humongous_is_live_table();
 224   void eagerly_reclaim_humongous_regions();
 225 
 226   // The number of regions we could create by expansion.
 227   uint _expansion_regions;
 228 
 229   // The block offset table for the G1 heap.
 230   G1BlockOffsetSharedArray* _bot_shared;
 231 
 232   // Tears down the region sets / lists so that they are empty and the
 233   // regions on the heap do not belong to a region set / list. The
 234   // only exception is the humongous set which we leave unaltered. If
 235   // free_list_only is true, it will only tear down the master free
 236   // list. It is called before a Full GC (free_list_only == false) or
 237   // before heap shrinking (free_list_only == true).
 238   void tear_down_region_sets(bool free_list_only);
 239 
 240   // Rebuilds the region sets / lists so that they are repopulated to
 241   // reflect the contents of the heap. The only exception is the
 242   // humongous set which was not torn down in the first place. If
 243   // free_list_only is true, it will only rebuild the master free


 273 
 274   // It resets the mutator alloc region before new allocations can take place.
 275   void init_mutator_alloc_region();
 276 
 277   // It releases the mutator alloc region.
 278   void release_mutator_alloc_region();
 279 
 280   // It initializes the GC alloc regions at the start of a GC.
 281   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 282 
 283   // It releases the GC alloc regions at the end of a GC.
 284   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 285 
 286   // It does any cleanup that needs to be done on the GC alloc regions
 287   // before a Full GC.
 288   void abandon_gc_alloc_regions();
 289 
 290   // Helper for monitoring and management support.
 291   G1MonitoringSupport* _g1mm;
 292 
 293   // Records whether the region at the given index is kept live by roots or
 294   // references from the young generation.
 295   class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {



 296    protected:
 297     bool default_value() const { return false; }
 298    public:
 299     void clear() { G1BiasedMappedArray<bool>::clear(); }
 300     void set_live(uint region) {
 301       set_by_index(region, true);
 302     }
 303     bool is_live(uint region) {



 304       return get_by_index(region);
 305     }
 306   };
 307 
 308   HumongousIsLiveBiasedMappedArray _humongous_is_live;
 309   // Stores whether during humongous object registration we found candidate regions.
 310   // If not, we can skip a few steps.
 311   bool _has_humongous_reclaim_candidates;
 312 
 313   volatile unsigned _gc_time_stamp;
 314 
 315   size_t* _surviving_young_words;
 316 
 317   G1HRPrinter _hr_printer;
 318 
 319   void setup_surviving_young_words();
 320   void update_surviving_young_words(size_t* surv_young_words);
 321   void cleanup_surviving_young_words();
 322 
 323   // It decides whether an explicit GC should start a concurrent cycle
 324   // instead of doing a STW GC. Currently, a concurrent cycle is
 325   // explicitly started if:
 326   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 327   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 328   // (c) cause == _g1_humongous_allocation


 626   }
 627 
 628   // Expand the garbage-first heap by at least the given size (in bytes!).
 629   // Returns true if the heap was expanded by the requested amount;
 630   // false otherwise.
 631   // (Rounds up to a HeapRegion boundary.)
 632   bool expand(size_t expand_bytes);
 633 
 634   // Returns the PLAB statistics for a given destination.
 635   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 636 
 637   // Determines PLAB size for a given destination.
 638   inline size_t desired_plab_sz(InCSetState dest);
 639 
 640   inline AllocationContextStats& allocation_context_stats();
 641 
 642   // Do anything common to GC's.
 643   void gc_prologue(bool full);
 644   void gc_epilogue(bool full);
 645 
 646   inline void set_humongous_is_live(oop obj);




 647 
 648   bool humongous_is_live(uint region) {
 649     return _humongous_is_live.is_live(region);
 650   }
 651 
 652   // Returns whether the given region (which must be a humongous (start) region)
 653   // is to be considered conservatively live regardless of any other conditions.
 654   bool humongous_region_is_always_live(uint index);
 655   // Returns whether the given region (which must be a humongous (start) region)
 656   // is considered a candidate for eager reclamation.
 657   bool humongous_region_is_candidate(uint index);
 658   // Register the given region to be part of the collection set.
 659   inline void register_humongous_region_with_cset(uint index);
 660   // Register regions with humongous objects (actually on the start region) in
 661   // the in_cset_fast_test table.
 662   void register_humongous_regions_with_cset();
 663   // We register a region with the fast "in collection set" test. We
 664   // simply set to true the array slot corresponding to this region.
 665   void register_young_region_with_cset(HeapRegion* r) {
 666     _in_cset_fast_test.set_in_young(r->hrm_index());
 667   }
 668   void register_old_region_with_cset(HeapRegion* r) {
 669     _in_cset_fast_test.set_in_old(r->hrm_index());
 670   }
 671   void clear_in_cset(const HeapRegion* hr) {
 672     _in_cset_fast_test.clear(hr);
 673   }
 674 
 675   void clear_cset_fast_test() {
 676     _in_cset_fast_test.clear();
 677   }




 203 
 204 private:
 205   // The one and only G1CollectedHeap, so static functions can find it.
 206   static G1CollectedHeap* _g1h;
 207 
 208   FlexibleWorkGang* _workers;
 209 
 210   static size_t _humongous_object_threshold_in_words;
 211 
 212   // The secondary free list which contains regions that have been
 213   // freed up during the cleanup process. This will be appended to
 214   // the master free list when appropriate.
 215   FreeRegionList _secondary_free_list;
 216 
 217   // It keeps track of the old regions.
 218   HeapRegionSet _old_set;
 219 
 220   // It keeps track of the humongous regions.
 221   HeapRegionSet _humongous_set;
 222 

 223   void eagerly_reclaim_humongous_regions();
 224 
 225   // The number of regions we could create by expansion.
 226   uint _expansion_regions;
 227 
 228   // The block offset table for the G1 heap.
 229   G1BlockOffsetSharedArray* _bot_shared;
 230 
 231   // Tears down the region sets / lists so that they are empty and the
 232   // regions on the heap do not belong to a region set / list. The
 233   // only exception is the humongous set which we leave unaltered. If
 234   // free_list_only is true, it will only tear down the master free
 235   // list. It is called before a Full GC (free_list_only == false) or
 236   // before heap shrinking (free_list_only == true).
 237   void tear_down_region_sets(bool free_list_only);
 238 
 239   // Rebuilds the region sets / lists so that they are repopulated to
 240   // reflect the contents of the heap. The only exception is the
 241   // humongous set which was not torn down in the first place. If
 242   // free_list_only is true, it will only rebuild the master free


 272 
 273   // It resets the mutator alloc region before new allocations can take place.
 274   void init_mutator_alloc_region();
 275 
 276   // It releases the mutator alloc region.
 277   void release_mutator_alloc_region();
 278 
 279   // It initializes the GC alloc regions at the start of a GC.
 280   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 281 
 282   // It releases the GC alloc regions at the end of a GC.
 283   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 284 
 285   // It does any cleanup that needs to be done on the GC alloc regions
 286   // before a Full GC.
 287   void abandon_gc_alloc_regions();
 288 
 289   // Helper for monitoring and management support.
 290   G1MonitoringSupport* _g1mm;
 291 
 292   // Records whether the region at the given index is (still) a
 293   // candidate for eager reclaim.  Only valid for humongous start
 294   // regions; other regions have unspecified values.  Initialized at
 295   // start of collection pause, with candidates removed as they are
 296   // found reachable from roots or the young generation.
 297   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 298    protected:
 299     bool default_value() const { return false; }
 300    public:
 301     void clear() { G1BiasedMappedArray<bool>::clear(); }
 302     void add_candidate(uint region) {
 303       set_by_index(region, true);
 304     }
 305     void remove_candidate(uint region) {
 306       set_by_index(region, false);
 307     }
 308     bool is_candidate(uint region) {
 309       return get_by_index(region);
 310     }
 311   };
 312 
 313   HumongousReclaimCandidates _humongous_reclaim_candidates;
 314   // Stores whether during humongous object registration we found candidate regions.
 315   // If not, we can skip a few steps.
 316   bool _has_humongous_reclaim_candidates;
 317 
 318   volatile unsigned _gc_time_stamp;
 319 
 320   size_t* _surviving_young_words;
 321 
 322   G1HRPrinter _hr_printer;
 323 
 324   void setup_surviving_young_words();
 325   void update_surviving_young_words(size_t* surv_young_words);
 326   void cleanup_surviving_young_words();
 327 
 328   // It decides whether an explicit GC should start a concurrent cycle
 329   // instead of doing a STW GC. Currently, a concurrent cycle is
 330   // explicitly started if:
 331   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 332   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 333   // (c) cause == _g1_humongous_allocation


 631   }
 632 
 633   // Expand the garbage-first heap by at least the given size (in bytes!).
 634   // Returns true if the heap was expanded by the requested amount;
 635   // false otherwise.
 636   // (Rounds up to a HeapRegion boundary.)
 637   bool expand(size_t expand_bytes);
 638 
 639   // Returns the PLAB statistics for a given destination.
 640   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 641 
 642   // Determines PLAB size for a given destination.
 643   inline size_t desired_plab_sz(InCSetState dest);
 644 
 645   inline AllocationContextStats& allocation_context_stats();
 646 
 647   // Do anything common to GC's.
 648   void gc_prologue(bool full);
 649   void gc_epilogue(bool full);
 650 
 651   // Modify the reclaim candidate set and test for presence.
 652   // These are only valid for starts_humongous regions.
 653   inline void add_humongous_reclaim_candidate(uint region);
 654   inline void remove_humongous_reclaim_candidate(uint region);
 655   inline bool is_humongous_reclaim_candidate(uint region);
 656 
 657   // Remove from the reclaim candidate set.  Also remove from the
 658   // collection set so that later encounters avoid the slow path.
 659   inline void set_humongous_is_live(oop obj);
 660 






 661   // Register the given region to be part of the collection set.
 662   inline void register_humongous_region_with_cset(uint index);
 663   // Register regions with humongous objects (actually on the start region) in
 664   // the in_cset_fast_test table.
 665   void register_humongous_regions_with_cset();
 666   // We register a region with the fast "in collection set" test. We
 667   // simply set to true the array slot corresponding to this region.
 668   void register_young_region_with_cset(HeapRegion* r) {
 669     _in_cset_fast_test.set_in_young(r->hrm_index());
 670   }
 671   void register_old_region_with_cset(HeapRegion* r) {
 672     _in_cset_fast_test.set_in_old(r->hrm_index());
 673   }
 674   void clear_in_cset(const HeapRegion* hr) {
 675     _in_cset_fast_test.clear(hr);
 676   }
 677 
 678   void clear_cset_fast_test() {
 679     _in_cset_fast_test.clear();
 680   }


< prev index next >