< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7993 : [mq]: fix


 199   // Testing classes.
 200   friend class G1CheckCSetFastTableClosure;
 201 
 202 private:
 203   // The one and only G1CollectedHeap, so static functions can find it.
 204   static G1CollectedHeap* _g1h;
 205 
 206   static size_t _humongous_object_threshold_in_words;
 207 
 208   // The secondary free list which contains regions that have been
 209   // freed up during the cleanup process. This will be appended to
 210   // the master free list when appropriate.
 211   FreeRegionList _secondary_free_list;
 212 
 213   // It keeps track of the old regions.
 214   HeapRegionSet _old_set;
 215 
 216   // It keeps track of the humongous regions.
 217   HeapRegionSet _humongous_set;
 218 
 219   void clear_humongous_is_live_table();
 220   void eagerly_reclaim_humongous_regions();
 221 
 222   // The number of regions we could create by expansion.
 223   uint _expansion_regions;
 224 
 225   // The block offset table for the G1 heap.
 226   G1BlockOffsetSharedArray* _bot_shared;
 227 
 228   // Tears down the region sets / lists so that they are empty and the
 229   // regions on the heap do not belong to a region set / list. The
 230   // only exception is the humongous set which we leave unaltered. If
 231   // free_list_only is true, it will only tear down the master free
 232   // list. It is called before a Full GC (free_list_only == false) or
 233   // before heap shrinking (free_list_only == true).
 234   void tear_down_region_sets(bool free_list_only);
 235 
 236   // Rebuilds the region sets / lists so that they are repopulated to
 237   // reflect the contents of the heap. The only exception is the
 238   // humongous set which was not torn down in the first place. If
 239   // free_list_only is true, it will only rebuild the master free


 269 
 270   // It resets the mutator alloc region before new allocations can take place.
 271   void init_mutator_alloc_region();
 272 
 273   // It releases the mutator alloc region.
 274   void release_mutator_alloc_region();
 275 
 276   // It initializes the GC alloc regions at the start of a GC.
 277   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 278 
 279   // It releases the GC alloc regions at the end of a GC.
 280   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 281 
 282   // It does any cleanup that needs to be done on the GC alloc regions
 283   // before a Full GC.
 284   void abandon_gc_alloc_regions();
 285 
 286   // Helper for monitoring and management support.
 287   G1MonitoringSupport* _g1mm;
 288 
 289   // Records whether the region at the given index is kept live by roots or
 290   // references from the young generation.
 291   class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {



 292    protected:
 293     bool default_value() const { return false; }
 294    public:
 295     void clear() { G1BiasedMappedArray<bool>::clear(); }
 296     void set_live(uint region) {
 297       set_by_index(region, true);
 298     }
 299     bool is_live(uint region) {



 300       return get_by_index(region);
 301     }
 302   };
 303 
 304   HumongousIsLiveBiasedMappedArray _humongous_is_live;
 305   // Stores whether during humongous object registration we found candidate regions.
 306   // If not, we can skip a few steps.
 307   bool _has_humongous_reclaim_candidates;
 308 
 309   volatile unsigned _gc_time_stamp;
 310 
 311   size_t* _surviving_young_words;
 312 
 313   G1HRPrinter _hr_printer;
 314 
 315   void setup_surviving_young_words();
 316   void update_surviving_young_words(size_t* surv_young_words);
 317   void cleanup_surviving_young_words();
 318 
 319   // It decides whether an explicit GC should start a concurrent cycle
 320   // instead of doing a STW GC. Currently, a concurrent cycle is
 321   // explicitly started if:
 322   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 323   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 324   // (c) cause == _g1_humongous_allocation


 617   // Expand the garbage-first heap by at least the given size (in bytes!).
 618   // Returns true if the heap was expanded by the requested amount;
 619   // false otherwise.
 620   // (Rounds up to a HeapRegion boundary.)
 621   bool expand(size_t expand_bytes);
 622 
 623   // Returns the PLAB statistics for a given destination.
 624   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 625 
 626   // Determines PLAB size for a given destination.
 627   inline size_t desired_plab_sz(InCSetState dest);
 628 
 629   inline AllocationContextStats& allocation_context_stats();
 630 
 631   // Do anything common to GC's.
 632   virtual void gc_prologue(bool full);
 633   virtual void gc_epilogue(bool full);
 634 
 635   inline void set_humongous_is_live(oop obj);
 636 
 637   bool humongous_is_live(uint region) {
 638     return _humongous_is_live.is_live(region);











 639   }
 640 
 641   // Returns whether the given region (which must be a humongous (start) region)
 642   // is to be considered conservatively live regardless of any other conditions.
 643   bool humongous_region_is_always_live(uint index);
 644   // Returns whether the given region (which must be a humongous (start) region)
 645   // is considered a candidate for eager reclamation.
 646   bool humongous_region_is_candidate(uint index);
 647   // Register the given region to be part of the collection set.
 648   inline void register_humongous_region_with_cset(uint index);
 649   // Register regions with humongous objects (actually on the start region) in
 650   // the in_cset_fast_test table.
 651   void register_humongous_regions_with_cset();
 652   // We register a region with the fast "in collection set" test. We
 653   // simply set to true the array slot corresponding to this region.
 654   void register_young_region_with_cset(HeapRegion* r) {
 655     _in_cset_fast_test.set_in_young(r->hrm_index());
 656   }
 657   void register_old_region_with_cset(HeapRegion* r) {
 658     _in_cset_fast_test.set_in_old(r->hrm_index());
 659   }
 660   void clear_in_cset(const HeapRegion* hr) {
 661     _in_cset_fast_test.clear(hr);
 662   }
 663 
 664   void clear_cset_fast_test() {
 665     _in_cset_fast_test.clear();
 666   }




 199   // Testing classes.
 200   friend class G1CheckCSetFastTableClosure;
 201 
 202 private:
 203   // The one and only G1CollectedHeap, so static functions can find it.
 204   static G1CollectedHeap* _g1h;
 205 
 206   static size_t _humongous_object_threshold_in_words;
 207 
 208   // The secondary free list which contains regions that have been
 209   // freed up during the cleanup process. This will be appended to
 210   // the master free list when appropriate.
 211   FreeRegionList _secondary_free_list;
 212 
 213   // It keeps track of the old regions.
 214   HeapRegionSet _old_set;
 215 
 216   // It keeps track of the humongous regions.
 217   HeapRegionSet _humongous_set;
 218 

 219   void eagerly_reclaim_humongous_regions();
 220 
 221   // The number of regions we could create by expansion.
 222   uint _expansion_regions;
 223 
 224   // The block offset table for the G1 heap.
 225   G1BlockOffsetSharedArray* _bot_shared;
 226 
 227   // Tears down the region sets / lists so that they are empty and the
 228   // regions on the heap do not belong to a region set / list. The
 229   // only exception is the humongous set which we leave unaltered. If
 230   // free_list_only is true, it will only tear down the master free
 231   // list. It is called before a Full GC (free_list_only == false) or
 232   // before heap shrinking (free_list_only == true).
 233   void tear_down_region_sets(bool free_list_only);
 234 
 235   // Rebuilds the region sets / lists so that they are repopulated to
 236   // reflect the contents of the heap. The only exception is the
 237   // humongous set which was not torn down in the first place. If
 238   // free_list_only is true, it will only rebuild the master free


 268 
 269   // It resets the mutator alloc region before new allocations can take place.
 270   void init_mutator_alloc_region();
 271 
 272   // It releases the mutator alloc region.
 273   void release_mutator_alloc_region();
 274 
 275   // It initializes the GC alloc regions at the start of a GC.
 276   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 277 
 278   // It releases the GC alloc regions at the end of a GC.
 279   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 280 
 281   // It does any cleanup that needs to be done on the GC alloc regions
 282   // before a Full GC.
 283   void abandon_gc_alloc_regions();
 284 
 285   // Helper for monitoring and management support.
 286   G1MonitoringSupport* _g1mm;
 287 
 288   // Records whether the region at the given index is (still) a
 289   // candidate for eager reclaim.  Only valid for humongous start
 290   // regions; other regions have unspecified values.  Initialized at
 291   // start of collection pause, with candidates removed as they are
 292   // found reachable from roots or the young generation.
 293   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 294    protected:
 295     bool default_value() const { return false; }
 296    public:
 297     void clear() { G1BiasedMappedArray<bool>::clear(); }
 298     void add_candidate(uint region) {
 299       set_by_index(region, true);
 300     }
 301     void remove_candidate(uint region) {
 302       set_by_index(region, false);
 303     }
 304     bool is_candidate(uint region) {
 305       return get_by_index(region);
 306     }
 307   };
 308 
 309   HumongousReclaimCandidates _humongous_reclaim_candidates;
 310   // Stores whether during humongous object registration we found candidate regions.
 311   // If not, we can skip a few steps.
 312   bool _has_humongous_reclaim_candidates;
 313 
 314   volatile unsigned _gc_time_stamp;
 315 
 316   size_t* _surviving_young_words;
 317 
 318   G1HRPrinter _hr_printer;
 319 
 320   void setup_surviving_young_words();
 321   void update_surviving_young_words(size_t* surv_young_words);
 322   void cleanup_surviving_young_words();
 323 
 324   // It decides whether an explicit GC should start a concurrent cycle
 325   // instead of doing a STW GC. Currently, a concurrent cycle is
 326   // explicitly started if:
 327   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 328   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 329   // (c) cause == _g1_humongous_allocation


 622   // Expand the garbage-first heap by at least the given size (in bytes!).
 623   // Returns true if the heap was expanded by the requested amount;
 624   // false otherwise.
 625   // (Rounds up to a HeapRegion boundary.)
 626   bool expand(size_t expand_bytes);
 627 
 628   // Returns the PLAB statistics for a given destination.
 629   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 630 
 631   // Determines PLAB size for a given destination.
 632   inline size_t desired_plab_sz(InCSetState dest);
 633 
 634   inline AllocationContextStats& allocation_context_stats();
 635 
 636   // Do anything common to GC's.
 637   virtual void gc_prologue(bool full);
 638   virtual void gc_epilogue(bool full);
 639 
 640   inline void set_humongous_is_live(oop obj);
 641 
 642   void add_humongous_reclaim_candidate(uint region) {
 643     assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
 644     _humongous_reclaim_candidates.add_candidate(region);
 645   }
 646 
 647   void remove_humongous_reclaim_candidate(uint region) {
 648     assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
 649     _humongous_reclaim_candidates.remove_candidate(region);
 650   }
 651 
 652   bool is_humongous_reclaim_candidate(uint region) {
 653     assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
 654     return _humongous_reclaim_candidates.is_candidate(region);
 655   }
 656 






 657   // Register the given region to be part of the collection set.
 658   inline void register_humongous_region_with_cset(uint index);
 659   // Register regions with humongous objects (actually on the start region) in
 660   // the in_cset_fast_test table.
 661   void register_humongous_regions_with_cset();
 662   // We register a region with the fast "in collection set" test. We
 663   // simply set to true the array slot corresponding to this region.
 664   void register_young_region_with_cset(HeapRegion* r) {
 665     _in_cset_fast_test.set_in_young(r->hrm_index());
 666   }
 667   void register_old_region_with_cset(HeapRegion* r) {
 668     _in_cset_fast_test.set_in_old(r->hrm_index());
 669   }
 670   void clear_in_cset(const HeapRegion* hr) {
 671     _in_cset_fast_test.clear(hr);
 672   }
 673 
 674   void clear_cset_fast_test() {
 675     _in_cset_fast_test.clear();
 676   }


< prev index next >