< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 58105 : [mq]: 8236073-softmaxheapsize


  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1CardTableEntryClosure;
  78 class G1CollectionSet;

  79 class G1Policy;
  80 class G1HotCardCache;
  81 class G1RemSet;
  82 class G1YoungRemSetSamplingThread;
  83 class G1ConcurrentMark;
  84 class G1ConcurrentMarkThread;
  85 class G1ConcurrentRefine;
  86 class GenerationCounters;
  87 class STWGCTimer;
  88 class G1NewTracer;
  89 class EvacuationFailedInfo;
  90 class nmethod;
  91 class WorkGang;
  92 class G1Allocator;
  93 class G1ArchiveAllocator;
  94 class G1FullGCScope;
  95 class G1HeapVerifier;
  96 class G1HeapSizingPolicy;
  97 class G1HeapSummary;
  98 class G1EvacSummary;


 548 
 549   WorkGang* workers() const { return _workers; }
 550 
 551   // Runs the given AbstractGangTask with the current active workers, returning the
 552   // total time taken.
 553   Tickspan run_task(AbstractGangTask* task);
 554 
 555   G1Allocator* allocator() {
 556     return _allocator;
 557   }
 558 
 559   G1HeapVerifier* verifier() {
 560     return _verifier;
 561   }
 562 
 563   G1MonitoringSupport* g1mm() {
 564     assert(_g1mm != NULL, "should have been initialized");
 565     return _g1mm;
 566   }
 567 
 568   void resize_heap_if_necessary();
 569 
 570   G1NUMA* numa() const { return _numa; }
 571 
 572   // Expand the garbage-first heap by at least the given size (in bytes!).
 573   // Returns true if the heap was expanded by the requested amount;
 574   // false otherwise.
 575   // (Rounds up to a HeapRegion boundary.)
 576   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 577   bool expand_single_region(uint node_index);
 578 
 579   // Returns the PLAB statistics for a given destination.
 580   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 581 
 582   // Determines PLAB size for a given destination.
 583   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 584 
 585   // Do anything common to GC's.
 586   void gc_prologue(bool full);
 587   void gc_epilogue(bool full);
 588 


 765   // precondition: !is_gc_active()
 766   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 767 
 768   // Helper for do_collection_pause_at_safepoint, containing the guts
 769   // of the incremental collection pause, executed by the vm thread.
 770   void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);
 771 
 772   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 773   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 774   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 775 
 776   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 777 
 778   // Actually do the work of evacuating the parts of the collection set.
 779   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 780   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 781 private:
 782   // Evacuate the next set of optional regions.
 783   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 784 



 785 public:
 786   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 787   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 788                                     G1RedirtyCardsQueueSet* rdcqs,
 789                                     G1ParScanThreadStateSet* pss);
 790 
 791   void expand_heap_after_young_collection();

 792   // Update object copying statistics.
 793   void record_obj_copy_mem_stats();
 794 
 795   // The hot card cache for remembered set insertion optimization.
 796   G1HotCardCache* _hot_card_cache;
 797 
 798   // The g1 remembered set of the heap.
 799   G1RemSet* _rem_set;
 800 
 801   // After a collection pause, convert the regions in the collection set into free
 802   // regions.
 803   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 804 
 805   // Abandon the current collection set without recording policy
 806   // statistics or updating free lists.
 807   void abandon_collection_set(G1CollectionSet* collection_set);
 808 
 809   // The concurrent marker (and the thread it runs in.)
 810   G1ConcurrentMark* _cm;
 811   G1ConcurrentMarkThread* _cm_thread;


 973   virtual void safepoint_synchronize_begin();
 974   virtual void safepoint_synchronize_end();
 975 
 976   // Does operations required after initialization has been done.
 977   void post_initialize();
 978 
 979   // Initialize weak reference processing.
 980   void ref_processing_init();
 981 
 982   virtual Name kind() const {
 983     return CollectedHeap::G1;
 984   }
 985 
 986   virtual const char* name() const {
 987     return "G1";
 988   }
 989 
 990   const G1CollectorState* collector_state() const { return &_collector_state; }
 991   G1CollectorState* collector_state() { return &_collector_state; }
 992 

 993   // The current policy object for the collector.
 994   G1Policy* policy() const { return _policy; }
 995   // The remembered set.
 996   G1RemSet* rem_set() const { return _rem_set; }
 997 
 998   inline G1GCPhaseTimes* phase_times() const;
 999 
1000   HeapRegionManager* hrm() const { return _hrm; }
1001 
1002   const G1CollectionSet* collection_set() const { return &_collection_set; }
1003   G1CollectionSet* collection_set() { return &_collection_set; }
1004 
1005   virtual SoftRefPolicy* soft_ref_policy();
1006 
1007   virtual void initialize_serviceability();
1008   virtual MemoryUsage memory_usage();
1009   virtual GrowableArray<GCMemoryManager*> memory_managers();
1010   virtual GrowableArray<MemoryPool*> memory_pools();
1011 
1012   // Try to minimize the remembered set.


1068   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1069 
1070   MemoryUsage get_auxiliary_data_memory_usage() const {
1071     return _hrm->get_auxiliary_data_memory_usage();
1072   }
1073 
1074   // The number of regions that are not completely free.
1075   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1076 
1077 #ifdef ASSERT
1078   bool is_on_master_free_list(HeapRegion* hr) {
1079     return _hrm->is_free(hr);
1080   }
1081 #endif // ASSERT
1082 
1083   inline void old_set_add(HeapRegion* hr);
1084   inline void old_set_remove(HeapRegion* hr);
1085 
1086   inline void archive_set_add(HeapRegion* hr);
1087 
1088   size_t non_young_capacity_bytes() {
1089     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1090   }
1091 
1092   // Determine whether the given region is one that we are using as an
1093   // old GC alloc region.
1094   bool is_old_gc_alloc_region(HeapRegion* hr);
1095 
1096   // Perform a collection of the heap; intended for use in implementing
1097   // "System.gc".  This probably implies as full a collection as the
1098   // "CollectedHeap" supports.
1099   virtual void collect(GCCause::Cause cause);
1100 
1101   // Perform a collection of the heap with the given cause.
1102   // Returns whether this collection actually executed.
1103   bool try_collect(GCCause::Cause cause);
1104 
1105   // True iff an evacuation has failed in the most-recent collection.
1106   bool evacuation_failed() { return _evacuation_failed; }
1107 
1108   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);


1269     // ensure that we don't try to allocate a TLAB as
1270     // humongous and that we don't allocate a humongous
1271     // object in a TLAB.
1272     return word_size > _humongous_object_threshold_in_words;
1273   }
1274 
1275   // Returns the humongous threshold for a specific region size
1276   static size_t humongous_threshold_for(size_t region_size) {
1277     return (region_size / 2);
1278   }
1279 
1280   // Returns the number of regions the humongous object of the given word size
1281   // requires.
1282   static size_t humongous_obj_size_in_regions(size_t word_size);
1283 
1284   // Print the maximum heap capacity.
1285   virtual size_t max_capacity() const;
1286 
1287   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1288   virtual size_t max_reserved_capacity() const;



1289 
1290   virtual jlong millis_since_last_gc();
1291 
1292 
1293   // Convenience function to be used in situations where the heap type can be
1294   // asserted to be this type.
1295   static G1CollectedHeap* heap();
1296 
1297   void set_region_short_lived_locked(HeapRegion* hr);
1298   // add appropriate methods for any other surv rate groups
1299 
1300   const G1SurvivorRegions* survivor() const { return &_survivor; }
1301 
1302   uint eden_regions_count() const { return _eden.length(); }
1303   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1304   uint survivor_regions_count() const { return _survivor.length(); }
1305   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1306   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1307   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1308   uint young_regions_count() const { return _eden.length() + _survivor.length(); }




  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1CardTableEntryClosure;
  78 class G1CollectionSet;
  79 class G1HeapSizingPolicy;
  80 class G1Policy;
  81 class G1HotCardCache;
  82 class G1RemSet;
  83 class G1YoungRemSetSamplingThread;
  84 class G1ConcurrentMark;
  85 class G1ConcurrentMarkThread;
  86 class G1ConcurrentRefine;
  87 class GenerationCounters;
  88 class STWGCTimer;
  89 class G1NewTracer;
  90 class EvacuationFailedInfo;
  91 class nmethod;
  92 class WorkGang;
  93 class G1Allocator;
  94 class G1ArchiveAllocator;
  95 class G1FullGCScope;
  96 class G1HeapVerifier;
  97 class G1HeapSizingPolicy;
  98 class G1HeapSummary;
  99 class G1EvacSummary;


 549 
 550   WorkGang* workers() const { return _workers; }
 551 
 552   // Runs the given AbstractGangTask with the current active workers, returning the
 553   // total time taken.
 554   Tickspan run_task(AbstractGangTask* task);
 555 
 556   G1Allocator* allocator() {
 557     return _allocator;
 558   }
 559 
 560   G1HeapVerifier* verifier() {
 561     return _verifier;
 562   }
 563 
 564   G1MonitoringSupport* g1mm() {
 565     assert(_g1mm != NULL, "should have been initialized");
 566     return _g1mm;
 567   }
 568 
 569   void resize_heap_after_full_gc();
 570 
 571   G1NUMA* numa() const { return _numa; }
 572 
 573   // Expand the garbage-first heap by at least the given size (in bytes!).
 574   // Returns true if the heap was expanded by the requested amount;
 575   // false otherwise.
 576   // (Rounds up to a HeapRegion boundary.)
 577   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 578   bool expand_single_region(uint node_index);
 579 
 580   // Returns the PLAB statistics for a given destination.
 581   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 582 
 583   // Determines PLAB size for a given destination.
 584   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 585 
 586   // Do anything common to GC's.
 587   void gc_prologue(bool full);
 588   void gc_epilogue(bool full);
 589 


 766   // precondition: !is_gc_active()
 767   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 768 
 769   // Helper for do_collection_pause_at_safepoint, containing the guts
 770   // of the incremental collection pause, executed by the vm thread.
 771   void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);
 772 
 773   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 774   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 775   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 776 
 777   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 778 
 779   // Actually do the work of evacuating the parts of the collection set.
 780   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 781   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 782 private:
 783   // Evacuate the next set of optional regions.
 784   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 785 
 786   bool expand_heap_after_young_collection();
 787   void shrink_heap_after_young_collection();
 788 
 789 public:
 790   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 791   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 792                                     G1RedirtyCardsQueueSet* rdcqs,
 793                                     G1ParScanThreadStateSet* pss);
 794 
 795   void resize_heap_after_young_collection();
 796 
 797   // Update object copying statistics.
 798   void record_obj_copy_mem_stats();
 799 
 800   // The hot card cache for remembered set insertion optimization.
 801   G1HotCardCache* _hot_card_cache;
 802 
 803   // The g1 remembered set of the heap.
 804   G1RemSet* _rem_set;
 805 
 806   // After a collection pause, convert the regions in the collection set into free
 807   // regions.
 808   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 809 
 810   // Abandon the current collection set without recording policy
 811   // statistics or updating free lists.
 812   void abandon_collection_set(G1CollectionSet* collection_set);
 813 
 814   // The concurrent marker (and the thread it runs in.)
 815   G1ConcurrentMark* _cm;
 816   G1ConcurrentMarkThread* _cm_thread;


 978   virtual void safepoint_synchronize_begin();
 979   virtual void safepoint_synchronize_end();
 980 
 981   // Does operations required after initialization has been done.
 982   void post_initialize();
 983 
 984   // Initialize weak reference processing.
 985   void ref_processing_init();
 986 
 987   virtual Name kind() const {
 988     return CollectedHeap::G1;
 989   }
 990 
 991   virtual const char* name() const {
 992     return "G1";
 993   }
 994 
 995   const G1CollectorState* collector_state() const { return &_collector_state; }
 996   G1CollectorState* collector_state() { return &_collector_state; }
 997 
 998   G1HeapSizingPolicy* heap_sizing_policy() const { return _heap_sizing_policy; }
 999   // The current policy object for the collector.
1000   G1Policy* policy() const { return _policy; }
1001   // The remembered set.
1002   G1RemSet* rem_set() const { return _rem_set; }
1003 
1004   inline G1GCPhaseTimes* phase_times() const;
1005 
1006   HeapRegionManager* hrm() const { return _hrm; }
1007 
1008   const G1CollectionSet* collection_set() const { return &_collection_set; }
1009   G1CollectionSet* collection_set() { return &_collection_set; }
1010 
1011   virtual SoftRefPolicy* soft_ref_policy();
1012 
1013   virtual void initialize_serviceability();
1014   virtual MemoryUsage memory_usage();
1015   virtual GrowableArray<GCMemoryManager*> memory_managers();
1016   virtual GrowableArray<MemoryPool*> memory_pools();
1017 
1018   // Try to minimize the remembered set.


1074   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1075 
1076   MemoryUsage get_auxiliary_data_memory_usage() const {
1077     return _hrm->get_auxiliary_data_memory_usage();
1078   }
1079 
1080   // The number of regions that are not completely free.
1081   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1082 
1083 #ifdef ASSERT
1084   bool is_on_master_free_list(HeapRegion* hr) {
1085     return _hrm->is_free(hr);
1086   }
1087 #endif // ASSERT
1088 
1089   inline void old_set_add(HeapRegion* hr);
1090   inline void old_set_remove(HeapRegion* hr);
1091 
1092   inline void archive_set_add(HeapRegion* hr);
1093 
1094   size_t non_young_capacity_bytes() const {
1095     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1096   }
1097 
1098   // Determine whether the given region is one that we are using as an
1099   // old GC alloc region.
1100   bool is_old_gc_alloc_region(HeapRegion* hr);
1101 
1102   // Perform a collection of the heap; intended for use in implementing
1103   // "System.gc".  This probably implies as full a collection as the
1104   // "CollectedHeap" supports.
1105   virtual void collect(GCCause::Cause cause);
1106 
1107   // Perform a collection of the heap with the given cause.
1108   // Returns whether this collection actually executed.
1109   bool try_collect(GCCause::Cause cause);
1110 
1111   // True iff an evacuation has failed in the most-recent collection.
1112   bool evacuation_failed() { return _evacuation_failed; }
1113 
1114   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);


1275     // ensure that we don't try to allocate a TLAB as
1276     // humongous and that we don't allocate a humongous
1277     // object in a TLAB.
1278     return word_size > _humongous_object_threshold_in_words;
1279   }
1280 
1281   // Returns the humongous threshold for a specific region size
1282   static size_t humongous_threshold_for(size_t region_size) {
1283     return (region_size / 2);
1284   }
1285 
1286   // Returns the number of regions the humongous object of the given word size
1287   // requires.
1288   static size_t humongous_obj_size_in_regions(size_t word_size);
1289 
1290   // Print the maximum heap capacity.
1291   virtual size_t max_capacity() const;
1292 
1293   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1294   virtual size_t max_reserved_capacity() const;
1295 
1296   // Print the soft maximum heap capacity.
1297   size_t soft_max_capacity() const;
1298 
1299   virtual jlong millis_since_last_gc();
1300 
1301 
1302   // Convenience function to be used in situations where the heap type can be
1303   // asserted to be this type.
1304   static G1CollectedHeap* heap();
1305 
1306   void set_region_short_lived_locked(HeapRegion* hr);
1307   // add appropriate methods for any other surv rate groups
1308 
1309   const G1SurvivorRegions* survivor() const { return &_survivor; }
1310 
1311   uint eden_regions_count() const { return _eden.length(); }
1312   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1313   uint survivor_regions_count() const { return _survivor.length(); }
1314   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1315   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1316   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1317   uint young_regions_count() const { return _eden.length() + _survivor.length(); }


< prev index next >