< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7323 : 8069367: Eagerly reclaimed humongous objects left on mark stack
Summary: Prevent eager reclaim of objects that might be on mark stack.
Reviewed-by: brutisso, tschatzl


 216   // Testing classes.
 217   friend class G1CheckCSetFastTableClosure;
 218 
 219 private:
 220   // The one and only G1CollectedHeap, so static functions can find it.
 221   static G1CollectedHeap* _g1h;
 222 
 223   static size_t _humongous_object_threshold_in_words;
 224 
 225   // The secondary free list which contains regions that have been
 226   // freed up during the cleanup process. This will be appended to
 227   // the master free list when appropriate.
 228   FreeRegionList _secondary_free_list;
 229 
 230   // It keeps track of the old regions.
 231   HeapRegionSet _old_set;
 232 
 233   // It keeps track of the humongous regions.
 234   HeapRegionSet _humongous_set;
 235 
 236   void clear_humongous_is_live_table();
 237   void eagerly_reclaim_humongous_regions();
 238 
 239   // The number of regions we could create by expansion.
 240   uint _expansion_regions;
 241 
 242   // The block offset table for the G1 heap.
 243   G1BlockOffsetSharedArray* _bot_shared;
 244 
 245   // Tears down the region sets / lists so that they are empty and the
 246   // regions on the heap do not belong to a region set / list. The
 247   // only exception is the humongous set which we leave unaltered. If
 248   // free_list_only is true, it will only tear down the master free
 249   // list. It is called before a Full GC (free_list_only == false) or
 250   // before heap shrinking (free_list_only == true).
 251   void tear_down_region_sets(bool free_list_only);
 252 
 253   // Rebuilds the region sets / lists so that they are repopulated to
 254   // reflect the contents of the heap. The only exception is the
 255   // humongous set which was not torn down in the first place. If
 256   // free_list_only is true, it will only rebuild the master free


 286 
 287   // It resets the mutator alloc region before new allocations can take place.
 288   void init_mutator_alloc_region();
 289 
 290   // It releases the mutator alloc region.
 291   void release_mutator_alloc_region();
 292 
 293   // It initializes the GC alloc regions at the start of a GC.
 294   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 295 
 296   // It releases the GC alloc regions at the end of a GC.
 297   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 298 
 299   // It does any cleanup that needs to be done on the GC alloc regions
 300   // before a Full GC.
 301   void abandon_gc_alloc_regions();
 302 
 303   // Helper for monitoring and management support.
 304   G1MonitoringSupport* _g1mm;
 305 
 306   // Records whether the region at the given index is kept live by roots or
 307   // references from the young generation.
 308   class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {




 309    protected:
 310     bool default_value() const { return false; }
 311    public:
 312     void clear() { G1BiasedMappedArray<bool>::clear(); }
 313     void set_live(uint region) {
 314       set_by_index(region, true);
 315     }
 316     bool is_live(uint region) {
 317       return get_by_index(region);
 318     }
 319   };
 320 
 321   HumongousIsLiveBiasedMappedArray _humongous_is_live;
 322   // Stores whether during humongous object registration we found candidate regions.
 323   // If not, we can skip a few steps.
 324   bool _has_humongous_reclaim_candidates;
 325 
 326   volatile unsigned _gc_time_stamp;
 327 
 328   size_t* _surviving_young_words;
 329 
 330   G1HRPrinter _hr_printer;
 331 
 332   void setup_surviving_young_words();
 333   void update_surviving_young_words(size_t* surv_young_words);
 334   void cleanup_surviving_young_words();
 335 
 336   // It decides whether an explicit GC should start a concurrent cycle
 337   // instead of doing a STW GC. Currently, a concurrent cycle is
 338   // explicitly started if:
 339   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 340   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 341   // (c) cause == _g1_humongous_allocation


 638   }
 639 
 640   // Expand the garbage-first heap by at least the given size (in bytes!).
 641   // Returns true if the heap was expanded by the requested amount;
 642   // false otherwise.
 643   // (Rounds up to a HeapRegion boundary.)
 644   bool expand(size_t expand_bytes);
 645 
 646   // Returns the PLAB statistics for a given destination.
 647   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 648 
 649   // Determines PLAB size for a given destination.
 650   inline size_t desired_plab_sz(InCSetState dest);
 651 
 652   inline AllocationContextStats& allocation_context_stats();
 653 
 654   // Do anything common to GC's.
 655   virtual void gc_prologue(bool full);
 656   virtual void gc_epilogue(bool full);
 657 
 658   inline void set_humongous_is_live(oop obj);



 659 
 660   bool humongous_is_live(uint region) {
 661     return _humongous_is_live.is_live(region);
 662   }
 663 
 664   // Returns whether the given region (which must be a humongous (start) region)
 665   // is to be considered conservatively live regardless of any other conditions.
 666   bool humongous_region_is_always_live(uint index);
 667   // Returns whether the given region (which must be a humongous (start) region)
 668   // is considered a candidate for eager reclamation.
 669   bool humongous_region_is_candidate(uint index);
 670   // Register the given region to be part of the collection set.
 671   inline void register_humongous_region_with_in_cset_fast_test(uint index);
 672   // Register regions with humongous objects (actually on the start region) in
 673   // the in_cset_fast_test table.
 674   void register_humongous_regions_with_in_cset_fast_test();
 675   // We register a region with the fast "in collection set" test. We
 676   // simply set to true the array slot corresponding to this region.
 677   void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
 678     _in_cset_fast_test.set_in_young(r->hrm_index());
 679   }
 680   void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
 681     _in_cset_fast_test.set_in_old(r->hrm_index());
 682   }
 683 
 684   // This is a fast test on whether a reference points into the
 685   // collection set or not. Assume that the reference
 686   // points into the heap.
 687   inline bool in_cset_fast_test(oop obj);
 688 
 689   void clear_cset_fast_test() {




 216   // Testing classes.
 217   friend class G1CheckCSetFastTableClosure;
 218 
 219 private:
 220   // The one and only G1CollectedHeap, so static functions can find it.
 221   static G1CollectedHeap* _g1h;
 222 
 223   static size_t _humongous_object_threshold_in_words;
 224 
 225   // The secondary free list which contains regions that have been
 226   // freed up during the cleanup process. This will be appended to
 227   // the master free list when appropriate.
 228   FreeRegionList _secondary_free_list;
 229 
 230   // It keeps track of the old regions.
 231   HeapRegionSet _old_set;
 232 
 233   // It keeps track of the humongous regions.
 234   HeapRegionSet _humongous_set;
 235 

 236   void eagerly_reclaim_humongous_regions();
 237 
 238   // The number of regions we could create by expansion.
 239   uint _expansion_regions;
 240 
 241   // The block offset table for the G1 heap.
 242   G1BlockOffsetSharedArray* _bot_shared;
 243 
 244   // Tears down the region sets / lists so that they are empty and the
 245   // regions on the heap do not belong to a region set / list. The
 246   // only exception is the humongous set which we leave unaltered. If
 247   // free_list_only is true, it will only tear down the master free
 248   // list. It is called before a Full GC (free_list_only == false) or
 249   // before heap shrinking (free_list_only == true).
 250   void tear_down_region_sets(bool free_list_only);
 251 
 252   // Rebuilds the region sets / lists so that they are repopulated to
 253   // reflect the contents of the heap. The only exception is the
 254   // humongous set which was not torn down in the first place. If
 255   // free_list_only is true, it will only rebuild the master free


 285 
 286   // It resets the mutator alloc region before new allocations can take place.
 287   void init_mutator_alloc_region();
 288 
 289   // It releases the mutator alloc region.
 290   void release_mutator_alloc_region();
 291 
 292   // It initializes the GC alloc regions at the start of a GC.
 293   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 294 
 295   // It releases the GC alloc regions at the end of a GC.
 296   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 297 
 298   // It does any cleanup that needs to be done on the GC alloc regions
 299   // before a Full GC.
 300   void abandon_gc_alloc_regions();
 301 
 302   // Helper for monitoring and management support.
 303   G1MonitoringSupport* _g1mm;
 304 
 305   // Records whether the region at the given index is (still) a
 306   // candidate for eager reclaim.  Only valid for humongous start
 307   // regions; other regions have unspecified values.  Humongous start
 308   // regions are initialized at start of collection pause, with
 309   // candidates removed from the set as they are found reachable from
 310   // roots or the young generation.
 311   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 312    protected:
 313     bool default_value() const { return false; }
 314    public:
 315     void clear() { G1BiasedMappedArray<bool>::clear(); }
 316     void set_candidate(uint region, bool value) {
 317       set_by_index(region, value);
 318     }
 319     bool is_candidate(uint region) {
 320       return get_by_index(region);
 321     }
 322   };
 323 
 324   HumongousReclaimCandidates _humongous_reclaim_candidates;
 325   // Stores whether during humongous object registration we found candidate regions.
 326   // If not, we can skip a few steps.
 327   bool _has_humongous_reclaim_candidates;
 328 
 329   volatile unsigned _gc_time_stamp;
 330 
 331   size_t* _surviving_young_words;
 332 
 333   G1HRPrinter _hr_printer;
 334 
 335   void setup_surviving_young_words();
 336   void update_surviving_young_words(size_t* surv_young_words);
 337   void cleanup_surviving_young_words();
 338 
 339   // It decides whether an explicit GC should start a concurrent cycle
 340   // instead of doing a STW GC. Currently, a concurrent cycle is
 341   // explicitly started if:
 342   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 343   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 344   // (c) cause == _g1_humongous_allocation


 641   }
 642 
 643   // Expand the garbage-first heap by at least the given size (in bytes!).
 644   // Returns true if the heap was expanded by the requested amount;
 645   // false otherwise.
 646   // (Rounds up to a HeapRegion boundary.)
 647   bool expand(size_t expand_bytes);
 648 
 649   // Returns the PLAB statistics for a given destination.
 650   inline PLABStats* alloc_buffer_stats(InCSetState dest);
 651 
 652   // Determines PLAB size for a given destination.
 653   inline size_t desired_plab_sz(InCSetState dest);
 654 
 655   inline AllocationContextStats& allocation_context_stats();
 656 
 657   // Do anything common to GC's.
 658   virtual void gc_prologue(bool full);
 659   virtual void gc_epilogue(bool full);
 660 
 661   // Modify the reclaim candidate set and test for presence.
 662   // These are only valid for starts_humongous regions.
 663   inline void set_humongous_reclaim_candidate(uint region, bool value);
 664   inline bool is_humongous_reclaim_candidate(uint region);
 665 
 666   // Remove from the reclaim candidate set.  Also remove from the
 667   // collection set so that later encounters avoid the slow path.
 668   inline void set_humongous_is_live(oop obj);
 669 






 670   // Register the given region to be part of the collection set.
 671   inline void register_humongous_region_with_in_cset_fast_test(uint index);
 672   // Register regions with humongous objects (actually on the start region) in
 673   // the in_cset_fast_test table.
 674   void register_humongous_regions_with_in_cset_fast_test();
 675   // We register a region with the fast "in collection set" test. We
 676   // simply set to true the array slot corresponding to this region.
 677   void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
 678     _in_cset_fast_test.set_in_young(r->hrm_index());
 679   }
 680   void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
 681     _in_cset_fast_test.set_in_old(r->hrm_index());
 682   }
 683 
 684   // This is a fast test on whether a reference points into the
 685   // collection set or not. Assume that the reference
 686   // points into the heap.
 687   inline bool in_cset_fast_test(oop obj);
 688 
 689   void clear_cset_fast_test() {


< prev index next >