src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6323 : 8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin


  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/evacuationInfo.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.hpp"

  31 #include "gc_implementation/g1/g1HRPrinter.hpp"
  32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  33 #include "gc_implementation/g1/g1RemSet.hpp"
  34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  35 #include "gc_implementation/g1/g1YCTypes.hpp"
  36 #include "gc_implementation/g1/heapRegionSeq.hpp"
  37 #include "gc_implementation/g1/heapRegionSet.hpp"
  38 #include "gc_implementation/shared/hSpaceCounters.hpp"
  39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  40 #include "memory/barrierSet.hpp"
  41 #include "memory/memRegion.hpp"
  42 #include "memory/sharedHeap.hpp"
  43 #include "utilities/stack.hpp"
  44 
  45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  46 // It uses the "Garbage First" heap organization and algorithm, which
  47 // may combine concurrent marking with parallel, incremental compaction of
  48 // heap subsets that will yield large amounts of garbage.
  49 
  50 // Forward declarations


 180 protected:
 181   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 182   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 183 public:
 184   OldGCAllocRegion()
 185   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 186 };
 187 
 188 // The G1 STW is alive closure.
 189 // An instance is embedded into the G1CH and used as the
 190 // (optional) _is_alive_non_header closure in the STW
 191 // reference processor. It is also extensively used during
 192 // reference processing during STW evacuation pauses.
 193 class G1STWIsAliveClosure: public BoolObjectClosure {
 194   G1CollectedHeap* _g1;
 195 public:
 196   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 197   bool do_object_b(oop p);
 198 };
 199 










 200 class RefineCardTableEntryClosure;
 201 
 202 class G1CollectedHeap : public SharedHeap {
 203   friend class VM_G1CollectForAllocation;
 204   friend class VM_G1CollectFull;
 205   friend class VM_G1IncCollectionPause;
 206   friend class VMStructs;
 207   friend class MutatorAllocRegion;
 208   friend class SurvivorGCAllocRegion;
 209   friend class OldGCAllocRegion;
 210 
 211   // Closures used in implementation.
 212   template <G1Barrier barrier, bool do_mark_object>
 213   friend class G1ParCopyClosure;
 214   friend class G1IsAliveClosure;
 215   friend class G1EvacuateFollowersClosure;
 216   friend class G1ParScanThreadState;
 217   friend class G1ParScanClosureSuper;
 218   friend class G1ParEvacuateFollowersClosure;
 219   friend class G1ParTask;


 336   // It initializes the GC alloc regions at the start of a GC.
 337   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 338 
 339   // It releases the GC alloc regions at the end of a GC.
 340   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 341 
 342   // It does any cleanup that needs to be done on the GC alloc regions
 343   // before a Full GC.
 344   void abandon_gc_alloc_regions();
 345 
 346   // Helper for monitoring and management support.
 347   G1MonitoringSupport* _g1mm;
 348 
 349   // Determines PLAB size for a particular allocation purpose.
 350   size_t desired_plab_sz(GCAllocPurpose purpose);
 351 
 352   // Outside of GC pauses, the number of bytes used in all regions other
 353   // than the current allocation region.
 354   size_t _summary_bytes_used;
 355 
 356   // This is used for a quick test on whether a reference points into
 357   // the collection set or not. Basically, we have an array, with one
 358   // byte per region, and that byte denotes whether the corresponding
 359   // region is in the collection set or not. The entry corresponding
 360   // the bottom of the heap, i.e., region 0, is pointed to by
 361   // _in_cset_fast_test_base.  The _in_cset_fast_test field has been
 362   // biased so that it actually points to address 0 of the address
 363   // space, to make the test as fast as possible (we can simply shift
 364   // the address to address into it, instead of having to subtract the
 365   // bottom of the heap from the address before shifting it; basically
 366   // it works in the same way the card table works).
 367   bool* _in_cset_fast_test;
 368 
 369   // The allocated array used for the fast test on whether a reference
 370   // points into the collection set or not. This field is also used to
 371   // free the array.
 372   bool* _in_cset_fast_test_base;
 373 
 374   // The length of the _in_cset_fast_test_base array.
 375   uint _in_cset_fast_test_length;
 376 
 377   volatile unsigned _gc_time_stamp;
 378 
 379   size_t* _surviving_young_words;
 380 
 381   G1HRPrinter _hr_printer;
 382 
 383   void setup_surviving_young_words();
 384   void update_surviving_young_words(size_t* surv_young_words);
 385   void cleanup_surviving_young_words();
 386 
 387   // It decides whether an explicit GC should start a concurrent cycle
 388   // instead of doing a STW GC. Currently, a concurrent cycle is
 389   // explicitly started if:
 390   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 391   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 392   // (c) cause == _g1_humongous_allocation
 393   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 394 
 395   // Keeps track of how many "old marking cycles" (i.e., Full GCs or


 678 public:
 679 
 680   G1MonitoringSupport* g1mm() {
 681     assert(_g1mm != NULL, "should have been initialized");
 682     return _g1mm;
 683   }
 684 
 685   // Expand the garbage-first heap by at least the given size (in bytes!).
 686   // Returns true if the heap was expanded by the requested amount;
 687   // false otherwise.
 688   // (Rounds up to a HeapRegion boundary.)
 689   bool expand(size_t expand_bytes);
 690 
 691   // Do anything common to GC's.
 692   virtual void gc_prologue(bool full);
 693   virtual void gc_epilogue(bool full);
 694 
 695   // We register a region with the fast "in collection set" test. We
 696   // simply set to true the array slot corresponding to this region.
 697   void register_region_with_in_cset_fast_test(HeapRegion* r) {
 698     assert(_in_cset_fast_test_base != NULL, "sanity");
 699     assert(r->in_collection_set(), "invariant");
 700     uint index = r->hrs_index();
 701     assert(index < _in_cset_fast_test_length, "invariant");
 702     assert(!_in_cset_fast_test_base[index], "invariant");
 703     _in_cset_fast_test_base[index] = true;
 704   }
 705 
 706   // This is a fast test on whether a reference points into the
 707   // collection set or not. Assume that the reference
 708   // points into the heap.
 709   inline bool in_cset_fast_test(oop obj);
 710 
 711   void clear_cset_fast_test() {
 712     assert(_in_cset_fast_test_base != NULL, "sanity");
 713     memset(_in_cset_fast_test_base, false,
 714            (size_t) _in_cset_fast_test_length * sizeof(bool));
 715   }
 716 
 717   // This is called at the start of either a concurrent cycle or a Full
 718   // GC to update the number of old marking cycles started.
 719   void increment_old_marking_cycles_started();
 720 
 721   // This is called at the end of either a concurrent cycle or a Full
 722   // GC to update the number of old marking cycles completed. Those two
 723   // can happen in a nested fashion, i.e., we start a concurrent
 724   // cycle, a Full GC happens half-way through it which ends first,
 725   // and then the cycle notices that a Full GC happened and ends
 726   // too. The concurrent parameter is a boolean to help us do a bit
 727   // tighter consistency checking in the method. If concurrent is
 728   // false, the caller is the inner caller in the nesting (i.e., the
 729   // Full GC). If concurrent is true, the caller is the outer caller
 730   // in this nesting (i.e., the concurrent cycle). Further nesting is
 731   // not currently supported. The end of this call also notifies
 732   // the FullGCCount_lock in case a Java thread is waiting for a full
 733   // GC to happen (e.g., it called System.gc() with
 734   // +ExplicitGCInvokesConcurrent).




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/evacuationInfo.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.hpp"
  31 #include "gc_implementation/g1/g1BiasedArray.hpp"
  32 #include "gc_implementation/g1/g1HRPrinter.hpp"
  33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  36 #include "gc_implementation/g1/g1YCTypes.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.hpp"
  38 #include "gc_implementation/g1/heapRegionSet.hpp"
  39 #include "gc_implementation/shared/hSpaceCounters.hpp"
  40 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  41 #include "memory/barrierSet.hpp"
  42 #include "memory/memRegion.hpp"
  43 #include "memory/sharedHeap.hpp"
  44 #include "utilities/stack.hpp"
  45 
  46 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  47 // It uses the "Garbage First" heap organization and algorithm, which
  48 // may combine concurrent marking with parallel, incremental compaction of
  49 // heap subsets that will yield large amounts of garbage.
  50 
  51 // Forward declarations


 181 protected:
 182   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 183   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 184 public:
 185   OldGCAllocRegion()
 186   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 187 };
 188 
 189 // The G1 STW is alive closure.
 190 // An instance is embedded into the G1CH and used as the
 191 // (optional) _is_alive_non_header closure in the STW
 192 // reference processor. It is also extensively used during
 193 // reference processing during STW evacuation pauses.
 194 class G1STWIsAliveClosure: public BoolObjectClosure {
 195   G1CollectedHeap* _g1;
 196 public:
 197   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 198   bool do_object_b(oop p);
 199 };
 200 
 201 // Instances of this class are used for quick tests on whether a reference points
 202 // into the collection set. Each of the array's elements denotes whether the
 203 // corresponding region is in the collection set.
 204 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
 205  protected:
 206   bool default_value() const { return false; }
 207  public:
 208   void clear() { G1BiasedMappedArray<bool>::clear(); }
 209 };
 210 
 211 class RefineCardTableEntryClosure;
 212 
 213 class G1CollectedHeap : public SharedHeap {
 214   friend class VM_G1CollectForAllocation;
 215   friend class VM_G1CollectFull;
 216   friend class VM_G1IncCollectionPause;
 217   friend class VMStructs;
 218   friend class MutatorAllocRegion;
 219   friend class SurvivorGCAllocRegion;
 220   friend class OldGCAllocRegion;
 221 
 222   // Closures used in implementation.
 223   template <G1Barrier barrier, bool do_mark_object>
 224   friend class G1ParCopyClosure;
 225   friend class G1IsAliveClosure;
 226   friend class G1EvacuateFollowersClosure;
 227   friend class G1ParScanThreadState;
 228   friend class G1ParScanClosureSuper;
 229   friend class G1ParEvacuateFollowersClosure;
 230   friend class G1ParTask;


 347   // It initializes the GC alloc regions at the start of a GC.
 348   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 349 
 350   // It releases the GC alloc regions at the end of a GC.
 351   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 352 
 353   // It does any cleanup that needs to be done on the GC alloc regions
 354   // before a Full GC.
 355   void abandon_gc_alloc_regions();
 356 
 357   // Helper for monitoring and management support.
 358   G1MonitoringSupport* _g1mm;
 359 
 360   // Determines PLAB size for a particular allocation purpose.
 361   size_t desired_plab_sz(GCAllocPurpose purpose);
 362 
 363   // Outside of GC pauses, the number of bytes used in all regions other
 364   // than the current allocation region.
 365   size_t _summary_bytes_used;
 366 
 367   // This array is used for a quick test on whether a reference points into
 368   // the collection set or not. Each of the array's elements denotes whether the
 369   // corresponding region is in the collection set or not.
 370   G1FastCSetBiasedMappedArray _in_cset_fast_test;
















 371 
 372   volatile unsigned _gc_time_stamp;
 373 
 374   size_t* _surviving_young_words;
 375 
 376   G1HRPrinter _hr_printer;
 377 
 378   void setup_surviving_young_words();
 379   void update_surviving_young_words(size_t* surv_young_words);
 380   void cleanup_surviving_young_words();
 381 
 382   // It decides whether an explicit GC should start a concurrent cycle
 383   // instead of doing a STW GC. Currently, a concurrent cycle is
 384   // explicitly started if:
 385   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 386   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 387   // (c) cause == _g1_humongous_allocation
 388   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 389 
 390   // Keeps track of how many "old marking cycles" (i.e., Full GCs or


 673 public:
 674 
 675   G1MonitoringSupport* g1mm() {
 676     assert(_g1mm != NULL, "should have been initialized");
 677     return _g1mm;
 678   }
 679 
 680   // Expand the garbage-first heap by at least the given size (in bytes!).
 681   // Returns true if the heap was expanded by the requested amount;
 682   // false otherwise.
 683   // (Rounds up to a HeapRegion boundary.)
 684   bool expand(size_t expand_bytes);
 685 
 686   // Do anything common to GC's.
 687   virtual void gc_prologue(bool full);
 688   virtual void gc_epilogue(bool full);
 689 
 690   // We register a region with the fast "in collection set" test. We
 691   // simply set to true the array slot corresponding to this region.
 692   void register_region_with_in_cset_fast_test(HeapRegion* r) {
 693     _in_cset_fast_test.set_by_index(r->hrs_index(), true);





 694   }
 695 
 696   // This is a fast test on whether a reference points into the
 697   // collection set or not. Assume that the reference
 698   // points into the heap.
 699   inline bool in_cset_fast_test(oop obj);
 700 
 701   void clear_cset_fast_test() {
 702     _in_cset_fast_test.clear();


 703   }
 704 
 705   // This is called at the start of either a concurrent cycle or a Full
 706   // GC to update the number of old marking cycles started.
 707   void increment_old_marking_cycles_started();
 708 
 709   // This is called at the end of either a concurrent cycle or a Full
 710   // GC to update the number of old marking cycles completed. Those two
 711   // can happen in a nested fashion, i.e., we start a concurrent
 712   // cycle, a Full GC happens half-way through it which ends first,
 713   // and then the cycle notices that a Full GC happened and ends
 714   // too. The concurrent parameter is a boolean to help us do a bit
 715   // tighter consistency checking in the method. If concurrent is
 716   // false, the caller is the inner caller in the nesting (i.e., the
 717   // Full GC). If concurrent is true, the caller is the outer caller
 718   // in this nesting (i.e., the concurrent cycle). Further nesting is
 719   // not currently supported. The end of this call also notifies
 720   // the FullGCCount_lock in case a Java thread is waiting for a full
 721   // GC to happen (e.g., it called System.gc() with
 722   // +ExplicitGCInvokesConcurrent).