< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 56323 : imported patch 8220310.mut.0
rev 56324 : imported patch 8220310.mut.1_thomas
rev 56326 : [mq]: 8220310.mut.1-3_kim


  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1EvacuationInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1HeapRegionAttr.hpp"

  43 #include "gc/g1/g1MonitoringSupport.hpp"
  44 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  45 #include "gc/g1/g1SurvivorRegions.hpp"
  46 #include "gc/g1/g1YCTypes.hpp"
  47 #include "gc/g1/heapRegionManager.hpp"
  48 #include "gc/g1/heapRegionSet.hpp"
  49 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  50 #include "gc/shared/barrierSet.hpp"
  51 #include "gc/shared/collectedHeap.hpp"
  52 #include "gc/shared/gcHeapSummary.hpp"
  53 #include "gc/shared/plab.hpp"
  54 #include "gc/shared/preservedMarks.hpp"
  55 #include "gc/shared/softRefPolicy.hpp"
  56 #include "memory/memRegion.hpp"
  57 #include "utilities/stack.hpp"
  58 
  59 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  60 // It uses the "Garbage First" heap organization and algorithm, which
  61 // may combine concurrent marking with parallel, incremental compaction of
  62 // heap subsets that will yield large amounts of garbage.


 175 
 176   // Tears down the region sets / lists so that they are empty and the
 177   // regions on the heap do not belong to a region set / list. The
 178   // only exception is the humongous set which we leave unaltered. If
 179   // free_list_only is true, it will only tear down the master free
 180   // list. It is called before a Full GC (free_list_only == false) or
 181   // before heap shrinking (free_list_only == true).
 182   void tear_down_region_sets(bool free_list_only);
 183 
 184   // Rebuilds the region sets / lists so that they are repopulated to
 185   // reflect the contents of the heap. The only exception is the
 186   // humongous set which was not torn down in the first place. If
 187   // free_list_only is true, it will only rebuild the master free
 188   // list. It is called after a Full GC (free_list_only == false) or
 189   // after heap shrinking (free_list_only == true).
 190   void rebuild_region_sets(bool free_list_only);
 191 
 192   // Callback for region mapping changed events.
 193   G1RegionMappingChangedListener _listener;
 194 



 195   // The sequence of all heap regions in the heap.
 196   HeapRegionManager* _hrm;
 197 
 198   // Manages all allocations with regions except humongous object allocations.
 199   G1Allocator* _allocator;
 200 
 201   // Manages all heap verification.
 202   G1HeapVerifier* _verifier;
 203 
 204   // Outside of GC pauses, the number of bytes used in all regions other
 205   // than the current allocation region(s).
 206   volatile size_t _summary_bytes_used;
 207 
 208   void increase_used(size_t bytes);
 209   void decrease_used(size_t bytes);
 210 
 211   void set_used(size_t bytes);
 212 
 213   // Class that handles archive allocation ranges.
 214   G1ArchiveAllocator* _archive_allocator;


 371 
 372   // The young region list.
 373   G1EdenRegions _eden;
 374   G1SurvivorRegions _survivor;
 375 
 376   STWGCTimer* _gc_timer_stw;
 377 
 378   G1NewTracer* _gc_tracer_stw;
 379 
 380   // The current policy object for the collector.
 381   G1Policy* _policy;
 382   G1HeapSizingPolicy* _heap_sizing_policy;
 383 
 384   G1CollectionSet _collection_set;
 385 
 386   // Try to allocate a single non-humongous HeapRegion sufficient for
 387   // an allocation of the given word_size. If do_expand is true,
 388   // attempt to expand the heap if necessary to satisfy the allocation
 389   // request. 'type' takes the type of region to be allocated. (Use constants
 390   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 391   HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);



 392 
 393   // Initialize a contiguous set of free regions of length num_regions
 394   // and starting at index first so that they appear as a single
 395   // humongous region.
 396   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
 397                                                       uint num_regions,
 398                                                       size_t word_size);
 399 
 400   // Attempt to allocate a humongous object of the given size. Return
 401   // NULL if unsuccessful.
 402   HeapWord* humongous_obj_allocate(size_t word_size);
 403 
 404   // The following two methods, allocate_new_tlab() and
 405   // mem_allocate(), are the two main entry points from the runtime
 406   // into the G1's allocation routines. They have the following
 407   // assumptions:
 408   //
 409   // * They should both be called outside safepoints.
 410   //
 411   // * They should both be called without holding the Heap_lock.


 446 
 447   // Second-level mutator allocation attempt: take the Heap_lock and
 448   // retry the allocation attempt, potentially scheduling a GC
 449   // pause. This should only be used for non-humongous allocations.
 450   HeapWord* attempt_allocation_slow(size_t word_size);
 451 
 452   // Takes the Heap_lock and attempts a humongous allocation. It can
 453   // potentially schedule a GC pause.
 454   HeapWord* attempt_allocation_humongous(size_t word_size);
 455 
 456   // Allocation attempt that should be called during safepoints (e.g.,
 457   // at the end of a successful GC). expect_null_mutator_alloc_region
 458   // specifies whether the mutator alloc region is expected to be NULL
 459   // or not.
 460   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 461                                             bool expect_null_mutator_alloc_region);
 462 
 463   // These methods are the "callbacks" from the G1AllocRegion class.
 464 
 465   // For mutator alloc regions.
 466   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 467   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 468                                    size_t allocated_bytes);
 469 
 470   // For GC alloc regions.
 471   bool has_more_regions(G1HeapRegionAttr dest);
 472   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
 473   void retire_gc_alloc_region(HeapRegion* alloc_region,
 474                               size_t allocated_bytes, G1HeapRegionAttr dest);
 475 
 476   // - if explicit_gc is true, the GC is for a System.gc() etc,
 477   //   otherwise it's for a failed allocation.
 478   // - if clear_all_soft_refs is true, all soft references should be
 479   //   cleared during the GC.
 480   // - it returns false if it is unable to do the collection due to the
 481   //   GC locker being active, true otherwise.
 482   bool do_full_collection(bool explicit_gc,
 483                           bool clear_all_soft_refs);
 484 
 485   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 486   virtual void do_full_collection(bool clear_all_soft_refs);


 531 
 532   // Runs the given AbstractGangTask with the current active workers, returning the
 533   // total time taken.
 534   Tickspan run_task(AbstractGangTask* task);
 535 
 536   G1Allocator* allocator() {
 537     return _allocator;
 538   }
 539 
 540   G1HeapVerifier* verifier() {
 541     return _verifier;
 542   }
 543 
 544   G1MonitoringSupport* g1mm() {
 545     assert(_g1mm != NULL, "should have been initialized");
 546     return _g1mm;
 547   }
 548 
 549   void resize_heap_if_necessary();
 550 


 551   // Expand the garbage-first heap by at least the given size (in bytes!).
 552   // Returns true if the heap was expanded by the requested amount;
 553   // false otherwise.
 554   // (Rounds up to a HeapRegion boundary.)
 555   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 556 
 557   // Returns the PLAB statistics for a given destination.
 558   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 559 
 560   // Determines PLAB size for a given destination.
 561   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 562 
 563   // Do anything common to GC's.
 564   void gc_prologue(bool full);
 565   void gc_epilogue(bool full);
 566 
 567   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 568   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 569 
 570   // Modify the reclaim candidate set and test for presence.
 571   // These are only valid for starts_humongous regions.
 572   inline void set_humongous_reclaim_candidate(uint region, bool value);
 573   inline bool is_humongous_reclaim_candidate(uint region);
 574 
 575   // Remove from the reclaim candidate set.  Also remove from the


 911   // Supplying a value for the _is_alive_non_header field is
 912   // optional but doing so prevents unnecessary additions to
 913   // the discovered lists during reference discovery.
 914   G1STWIsAliveClosure _is_alive_closure_stw;
 915 
 916   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 917 
 918   // The (concurrent marking) reference processor...
 919   ReferenceProcessor* _ref_processor_cm;
 920 
 921   // Instance of the concurrent mark is_alive closure for embedding
 922   // into the Concurrent Marking reference processor as the
 923   // _is_alive_non_header field. Supplying a value for the
 924   // _is_alive_non_header field is optional but doing so prevents
 925   // unnecessary additions to the discovered lists during reference
 926   // discovery.
 927   G1CMIsAliveClosure _is_alive_closure_cm;
 928 
 929   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 930 public:
 931 
 932   RefToScanQueue *task_queue(uint i) const;
 933 
 934   uint num_task_queues() const;
 935 
 936   // Create a G1CollectedHeap.
 937   // Must call the initialize method afterwards.
 938   // May not return if something goes wrong.
 939   G1CollectedHeap();
 940 
 941 private:
 942   jint initialize_concurrent_refinement();
 943   jint initialize_young_gen_sampling_thread();
 944 public:
 945   // Initialize the G1CollectedHeap to have the initial and
 946   // maximum sizes and remembered and barrier sets
 947   // specified by the policy object.
 948   jint initialize();
 949 
 950   virtual void stop();
 951   virtual void safepoint_synchronize_begin();




  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1EvacuationInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1HeapRegionAttr.hpp"
  43 #include "gc/g1/g1MemoryNodeManager.hpp"
  44 #include "gc/g1/g1MonitoringSupport.hpp"
  45 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  46 #include "gc/g1/g1SurvivorRegions.hpp"
  47 #include "gc/g1/g1YCTypes.hpp"
  48 #include "gc/g1/heapRegionManager.hpp"
  49 #include "gc/g1/heapRegionSet.hpp"
  50 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  51 #include "gc/shared/barrierSet.hpp"
  52 #include "gc/shared/collectedHeap.hpp"
  53 #include "gc/shared/gcHeapSummary.hpp"
  54 #include "gc/shared/plab.hpp"
  55 #include "gc/shared/preservedMarks.hpp"
  56 #include "gc/shared/softRefPolicy.hpp"
  57 #include "memory/memRegion.hpp"
  58 #include "utilities/stack.hpp"
  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.


 176 
 177   // Tears down the region sets / lists so that they are empty and the
 178   // regions on the heap do not belong to a region set / list. The
 179   // only exception is the humongous set which we leave unaltered. If
 180   // free_list_only is true, it will only tear down the master free
 181   // list. It is called before a Full GC (free_list_only == false) or
 182   // before heap shrinking (free_list_only == true).
 183   void tear_down_region_sets(bool free_list_only);
 184 
 185   // Rebuilds the region sets / lists so that they are repopulated to
 186   // reflect the contents of the heap. The only exception is the
 187   // humongous set which was not torn down in the first place. If
 188   // free_list_only is true, it will only rebuild the master free
 189   // list. It is called after a Full GC (free_list_only == false) or
 190   // after heap shrinking (free_list_only == true).
 191   void rebuild_region_sets(bool free_list_only);
 192 
 193   // Callback for region mapping changed events.
 194   G1RegionMappingChangedListener _listener;
 195 
 196   // Manages single or multi node memory.
 197   G1MemoryNodeManager* _mem_node_mgr;
 198 
 199   // The sequence of all heap regions in the heap.
 200   HeapRegionManager* _hrm;
 201 
 202   // Manages all allocations with regions except humongous object allocations.
 203   G1Allocator* _allocator;
 204 
 205   // Manages all heap verification.
 206   G1HeapVerifier* _verifier;
 207 
 208   // Outside of GC pauses, the number of bytes used in all regions other
 209   // than the current allocation region(s).
 210   volatile size_t _summary_bytes_used;
 211 
 212   void increase_used(size_t bytes);
 213   void decrease_used(size_t bytes);
 214 
 215   void set_used(size_t bytes);
 216 
 217   // Class that handles archive allocation ranges.
 218   G1ArchiveAllocator* _archive_allocator;


 375 
 376   // The young region list.
 377   G1EdenRegions _eden;
 378   G1SurvivorRegions _survivor;
 379 
 380   STWGCTimer* _gc_timer_stw;
 381 
 382   G1NewTracer* _gc_tracer_stw;
 383 
 384   // The current policy object for the collector.
 385   G1Policy* _policy;
 386   G1HeapSizingPolicy* _heap_sizing_policy;
 387 
 388   G1CollectionSet _collection_set;
 389 
 390   // Try to allocate a single non-humongous HeapRegion sufficient for
 391   // an allocation of the given word_size. If do_expand is true,
 392   // attempt to expand the heap if necessary to satisfy the allocation
 393   // request. 'type' takes the type of region to be allocated. (Use constants
 394   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 395   HeapRegion* new_region(size_t word_size,
 396                          HeapRegionType type,
 397                          bool do_expand,
 398                          uint node_index = G1MemoryNodeManager::AnyNodeIndex);
 399 
 400   // Initialize a contiguous set of free regions of length num_regions
 401   // and starting at index first so that they appear as a single
 402   // humongous region.
 403   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
 404                                                       uint num_regions,
 405                                                       size_t word_size);
 406 
 407   // Attempt to allocate a humongous object of the given size. Return
 408   // NULL if unsuccessful.
 409   HeapWord* humongous_obj_allocate(size_t word_size);
 410 
 411   // The following two methods, allocate_new_tlab() and
 412   // mem_allocate(), are the two main entry points from the runtime
 413   // into the G1's allocation routines. They have the following
 414   // assumptions:
 415   //
 416   // * They should both be called outside safepoints.
 417   //
 418   // * They should both be called without holding the Heap_lock.


 453 
 454   // Second-level mutator allocation attempt: take the Heap_lock and
 455   // retry the allocation attempt, potentially scheduling a GC
 456   // pause. This should only be used for non-humongous allocations.
 457   HeapWord* attempt_allocation_slow(size_t word_size);
 458 
 459   // Takes the Heap_lock and attempts a humongous allocation. It can
 460   // potentially schedule a GC pause.
 461   HeapWord* attempt_allocation_humongous(size_t word_size);
 462 
 463   // Allocation attempt that should be called during safepoints (e.g.,
 464   // at the end of a successful GC). expect_null_mutator_alloc_region
 465   // specifies whether the mutator alloc region is expected to be NULL
 466   // or not.
 467   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 468                                             bool expect_null_mutator_alloc_region);
 469 
 470   // These methods are the "callbacks" from the G1AllocRegion class.
 471 
 472   // For mutator alloc regions.
 473   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 474   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 475                                    size_t allocated_bytes);
 476 
 477   // For GC alloc regions.
 478   bool has_more_regions(G1HeapRegionAttr dest);
 479   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
 480   void retire_gc_alloc_region(HeapRegion* alloc_region,
 481                               size_t allocated_bytes, G1HeapRegionAttr dest);
 482 
 483   // - if explicit_gc is true, the GC is for a System.gc() etc,
 484   //   otherwise it's for a failed allocation.
 485   // - if clear_all_soft_refs is true, all soft references should be
 486   //   cleared during the GC.
 487   // - it returns false if it is unable to do the collection due to the
 488   //   GC locker being active, true otherwise.
 489   bool do_full_collection(bool explicit_gc,
 490                           bool clear_all_soft_refs);
 491 
 492   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 493   virtual void do_full_collection(bool clear_all_soft_refs);


 538 
 539   // Runs the given AbstractGangTask with the current active workers, returning the
 540   // total time taken.
 541   Tickspan run_task(AbstractGangTask* task);
 542 
 543   G1Allocator* allocator() {
 544     return _allocator;
 545   }
 546 
 547   G1HeapVerifier* verifier() {
 548     return _verifier;
 549   }
 550 
 551   G1MonitoringSupport* g1mm() {
 552     assert(_g1mm != NULL, "should have been initialized");
 553     return _g1mm;
 554   }
 555 
 556   void resize_heap_if_necessary();
 557 
 558   G1MemoryNodeManager* mem_node_mgr() const { return _mem_node_mgr; }
 559 
 560   // Expand the garbage-first heap by at least the given size (in bytes!).
 561   // Returns true if the heap was expanded by the requested amount;
 562   // false otherwise.
 563   // (Rounds up to a HeapRegion boundary.)
 564   bool expand(size_t expand_bytes, uint node_index, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 565 
 566   // Returns the PLAB statistics for a given destination.
 567   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 568 
 569   // Determines PLAB size for a given destination.
 570   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 571 
 572   // Do anything common to GC's.
 573   void gc_prologue(bool full);
 574   void gc_epilogue(bool full);
 575 
 576   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 577   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 578 
 579   // Modify the reclaim candidate set and test for presence.
 580   // These are only valid for starts_humongous regions.
 581   inline void set_humongous_reclaim_candidate(uint region, bool value);
 582   inline bool is_humongous_reclaim_candidate(uint region);
 583 
 584   // Remove from the reclaim candidate set.  Also remove from the


 920   // Supplying a value for the _is_alive_non_header field is
 921   // optional but doing so prevents unnecessary additions to
 922   // the discovered lists during reference discovery.
 923   G1STWIsAliveClosure _is_alive_closure_stw;
 924 
 925   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 926 
 927   // The (concurrent marking) reference processor...
 928   ReferenceProcessor* _ref_processor_cm;
 929 
 930   // Instance of the concurrent mark is_alive closure for embedding
 931   // into the Concurrent Marking reference processor as the
 932   // _is_alive_non_header field. Supplying a value for the
 933   // _is_alive_non_header field is optional but doing so prevents
 934   // unnecessary additions to the discovered lists during reference
 935   // discovery.
 936   G1CMIsAliveClosure _is_alive_closure_cm;
 937 
 938   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 939 public:

 940   RefToScanQueue *task_queue(uint i) const;
 941 
 942   uint num_task_queues() const;
 943 
 944   // Create a G1CollectedHeap.
 945   // Must call the initialize method afterwards.
 946   // May not return if something goes wrong.
 947   G1CollectedHeap();
 948 
 949 private:
 950   jint initialize_concurrent_refinement();
 951   jint initialize_young_gen_sampling_thread();
 952 public:
 953   // Initialize the G1CollectedHeap to have the initial and
 954   // maximum sizes and remembered and barrier sets
 955   // specified by the policy object.
 956   jint initialize();
 957 
 958   virtual void stop();
 959   virtual void safepoint_synchronize_begin();


< prev index next >