Print this page
JDK-8236073 G1: Use SoftMaxHeapSize to guide GC heuristics


  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1CardTableEntryClosure;
  78 class G1CollectionSet;

  79 class G1Policy;
  80 class G1HotCardCache;
  81 class G1RemSet;
  82 class G1YoungRemSetSamplingThread;
  83 class G1ConcurrentMark;
  84 class G1ConcurrentMarkThread;
  85 class G1ConcurrentRefine;
  86 class GenerationCounters;
  87 class STWGCTimer;
  88 class G1NewTracer;
  89 class EvacuationFailedInfo;
  90 class nmethod;
  91 class WorkGang;
  92 class G1Allocator;
  93 class G1ArchiveAllocator;
  94 class G1FullGCScope;
  95 class G1HeapVerifier;
  96 class G1HeapSizingPolicy;
  97 class G1HeapSummary;
  98 class G1EvacSummary;


 548 
 549   WorkGang* workers() const { return _workers; }
 550 
 551   // Runs the given AbstractGangTask with the current active workers, returning the
 552   // total time taken.
 553   Tickspan run_task(AbstractGangTask* task);
 554 
 555   G1Allocator* allocator() {
 556     return _allocator;
 557   }
 558 
 559   G1HeapVerifier* verifier() {
 560     return _verifier;
 561   }
 562 
 563   G1MonitoringSupport* g1mm() {
 564     assert(_g1mm != NULL, "should have been initialized");
 565     return _g1mm;
 566   }
 567 
 568   void resize_heap_if_necessary();
 569 
 570   G1NUMA* numa() const { return _numa; }
 571 
 572   // Expand the garbage-first heap by at least the given size (in bytes!).
 573   // Returns true if the heap was expanded by the requested amount;
 574   // false otherwise.
 575   // (Rounds up to a HeapRegion boundary.)
 576   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 577   bool expand_single_region(uint node_index);
 578 
 579   // Returns the PLAB statistics for a given destination.
 580   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 581 
 582   // Determines PLAB size for a given destination.
 583   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 584 
 585   // Do anything common to GC's.
 586   void gc_prologue(bool full);
 587   void gc_epilogue(bool full);
 588 


 758   void wait_for_root_region_scanning();
 759 
 760   // The guts of the incremental collection pause, executed by the vm
 761   // thread. It returns false if it is unable to do the collection due
 762   // to the GC locker being active, true otherwise
 763   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 764 
 765   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 766   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 767   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 768 
 769   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 770 
 771   // Actually do the work of evacuating the parts of the collection set.
 772   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 773   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 774 private:
 775   // Evacuate the next set of optional regions.
 776   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 777 



 778 public:
 779   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 780   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 781                                     G1RedirtyCardsQueueSet* rdcqs,
 782                                     G1ParScanThreadStateSet* pss);
 783 
 784   void expand_heap_after_young_collection();


 785   // Update object copying statistics.
 786   void record_obj_copy_mem_stats();
 787 
 788   // The hot card cache for remembered set insertion optimization.
 789   G1HotCardCache* _hot_card_cache;
 790 
 791   // The g1 remembered set of the heap.
 792   G1RemSet* _rem_set;
 793 
 794   // After a collection pause, convert the regions in the collection set into free
 795   // regions.
 796   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 797 
 798   // Abandon the current collection set without recording policy
 799   // statistics or updating free lists.
 800   void abandon_collection_set(G1CollectionSet* collection_set);
 801 
 802   // The concurrent marker (and the thread it runs in.)
 803   G1ConcurrentMark* _cm;
 804   G1ConcurrentMarkThread* _cm_thread;


 966   virtual void safepoint_synchronize_begin();
 967   virtual void safepoint_synchronize_end();
 968 
 969   // Does operations required after initialization has been done.
 970   void post_initialize();
 971 
 972   // Initialize weak reference processing.
 973   void ref_processing_init();
 974 
 975   virtual Name kind() const {
 976     return CollectedHeap::G1;
 977   }
 978 
 979   virtual const char* name() const {
 980     return "G1";
 981   }
 982 
 983   const G1CollectorState* collector_state() const { return &_collector_state; }
 984   G1CollectorState* collector_state() { return &_collector_state; }
 985 

 986   // The current policy object for the collector.
 987   G1Policy* policy() const { return _policy; }
 988   // The remembered set.
 989   G1RemSet* rem_set() const { return _rem_set; }
 990 
 991   inline G1GCPhaseTimes* phase_times() const;
 992 
 993   HeapRegionManager* hrm() const { return _hrm; }
 994 
 995   const G1CollectionSet* collection_set() const { return &_collection_set; }
 996   G1CollectionSet* collection_set() { return &_collection_set; }
 997 
 998   virtual SoftRefPolicy* soft_ref_policy();
 999 
1000   virtual void initialize_serviceability();
1001   virtual MemoryUsage memory_usage();
1002   virtual GrowableArray<GCMemoryManager*> memory_managers();
1003   virtual GrowableArray<MemoryPool*> memory_pools();
1004 
1005   // Try to minimize the remembered set.


1061   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1062 
1063   MemoryUsage get_auxiliary_data_memory_usage() const {
1064     return _hrm->get_auxiliary_data_memory_usage();
1065   }
1066 
1067   // The number of regions that are not completely free.
1068   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1069 
1070 #ifdef ASSERT
1071   bool is_on_master_free_list(HeapRegion* hr) {
1072     return _hrm->is_free(hr);
1073   }
1074 #endif // ASSERT
1075 
1076   inline void old_set_add(HeapRegion* hr);
1077   inline void old_set_remove(HeapRegion* hr);
1078 
1079   inline void archive_set_add(HeapRegion* hr);
1080 
1081   size_t non_young_capacity_bytes() {
1082     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1083   }
1084 
1085   // Determine whether the given region is one that we are using as an
1086   // old GC alloc region.
1087   bool is_old_gc_alloc_region(HeapRegion* hr);
1088 
1089   // Perform a collection of the heap; intended for use in implementing
1090   // "System.gc".  This probably implies as full a collection as the
1091   // "CollectedHeap" supports.
1092   virtual void collect(GCCause::Cause cause);
1093 
1094   // Perform a collection of the heap with the given cause.
1095   // Returns whether this collection actually executed.
1096   bool try_collect(GCCause::Cause cause);
1097 
1098   // True iff an evacuation has failed in the most-recent collection.
1099   bool evacuation_failed() { return _evacuation_failed; }
1100 
1101   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);


1262     // ensure that we don't try to allocate a TLAB as
1263     // humongous and that we don't allocate a humongous
1264     // object in a TLAB.
1265     return word_size > _humongous_object_threshold_in_words;
1266   }
1267 
1268   // Returns the humongous threshold for a specific region size
1269   static size_t humongous_threshold_for(size_t region_size) {
1270     return (region_size / 2);
1271   }
1272 
1273   // Returns the number of regions the humongous object of the given word size
1274   // requires.
1275   static size_t humongous_obj_size_in_regions(size_t word_size);
1276 
1277   // Print the maximum heap capacity.
1278   virtual size_t max_capacity() const;
1279 
1280   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1281   virtual size_t max_reserved_capacity() const;



1282 
1283   virtual jlong millis_since_last_gc();
1284 
1285 
1286   // Convenience function to be used in situations where the heap type can be
1287   // asserted to be this type.
1288   static G1CollectedHeap* heap();
1289 
1290   void set_region_short_lived_locked(HeapRegion* hr);
1291   // add appropriate methods for any other surv rate groups
1292 
1293   const G1SurvivorRegions* survivor() const { return &_survivor; }
1294 
1295   uint eden_regions_count() const { return _eden.length(); }
1296   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1297   uint survivor_regions_count() const { return _survivor.length(); }
1298   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1299   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1300   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1301   uint young_regions_count() const { return _eden.length() + _survivor.length(); }




  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1CardTableEntryClosure;
  78 class G1CollectionSet;
  79 class G1HeapSizingPolicy;
  80 class G1Policy;
  81 class G1HotCardCache;
  82 class G1RemSet;
  83 class G1YoungRemSetSamplingThread;
  84 class G1ConcurrentMark;
  85 class G1ConcurrentMarkThread;
  86 class G1ConcurrentRefine;
  87 class GenerationCounters;
  88 class STWGCTimer;
  89 class G1NewTracer;
  90 class EvacuationFailedInfo;
  91 class nmethod;
  92 class WorkGang;
  93 class G1Allocator;
  94 class G1ArchiveAllocator;
  95 class G1FullGCScope;
  96 class G1HeapVerifier;
  97 class G1HeapSizingPolicy;
  98 class G1HeapSummary;
  99 class G1EvacSummary;


 549 
 550   WorkGang* workers() const { return _workers; }
 551 
 552   // Runs the given AbstractGangTask with the current active workers, returning the
 553   // total time taken.
 554   Tickspan run_task(AbstractGangTask* task);
 555 
 556   G1Allocator* allocator() {
 557     return _allocator;
 558   }
 559 
 560   G1HeapVerifier* verifier() {
 561     return _verifier;
 562   }
 563 
 564   G1MonitoringSupport* g1mm() {
 565     assert(_g1mm != NULL, "should have been initialized");
 566     return _g1mm;
 567   }
 568 
 569   void resize_heap_after_full_gc();
 570 
 571   G1NUMA* numa() const { return _numa; }
 572 
 573   // Expand the garbage-first heap by at least the given size (in bytes!).
 574   // Returns true if the heap was expanded by the requested amount;
 575   // false otherwise.
 576   // (Rounds up to a HeapRegion boundary.)
 577   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 578   bool expand_single_region(uint node_index);
 579 
 580   // Returns the PLAB statistics for a given destination.
 581   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 582 
 583   // Determines PLAB size for a given destination.
 584   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 585 
 586   // Do anything common to GC's.
 587   void gc_prologue(bool full);
 588   void gc_epilogue(bool full);
 589 


 759   void wait_for_root_region_scanning();
 760 
 761   // The guts of the incremental collection pause, executed by the vm
 762   // thread. It returns false if it is unable to do the collection due
 763   // to the GC locker being active, true otherwise
 764   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 765 
 766   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 767   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 768   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 769 
 770   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 771 
 772   // Actually do the work of evacuating the parts of the collection set.
 773   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 774   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 775 private:
 776   // Evacuate the next set of optional regions.
 777   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 778 
 779   bool expand_heap_after_young_collection();
 780   void shrink_heap_after_young_collection();
 781 
 782 public:
 783   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 784   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 785                                     G1RedirtyCardsQueueSet* rdcqs,
 786                                     G1ParScanThreadStateSet* pss);
 787 
 788   void resize_heap_after_young_collection();
 789   void shrink_heap_after_concurrent_mark();
 790 
 791   // Update object copying statistics.
 792   void record_obj_copy_mem_stats();
 793 
 794   // The hot card cache for remembered set insertion optimization.
 795   G1HotCardCache* _hot_card_cache;
 796 
 797   // The g1 remembered set of the heap.
 798   G1RemSet* _rem_set;
 799 
 800   // After a collection pause, convert the regions in the collection set into free
 801   // regions.
 802   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 803 
 804   // Abandon the current collection set without recording policy
 805   // statistics or updating free lists.
 806   void abandon_collection_set(G1CollectionSet* collection_set);
 807 
 808   // The concurrent marker (and the thread it runs in.)
 809   G1ConcurrentMark* _cm;
 810   G1ConcurrentMarkThread* _cm_thread;


 972   virtual void safepoint_synchronize_begin();
 973   virtual void safepoint_synchronize_end();
 974 
 975   // Does operations required after initialization has been done.
 976   void post_initialize();
 977 
 978   // Initialize weak reference processing.
 979   void ref_processing_init();
 980 
 981   virtual Name kind() const {
 982     return CollectedHeap::G1;
 983   }
 984 
 985   virtual const char* name() const {
 986     return "G1";
 987   }
 988 
 989   const G1CollectorState* collector_state() const { return &_collector_state; }
 990   G1CollectorState* collector_state() { return &_collector_state; }
 991 
 992   G1HeapSizingPolicy* heap_sizing_policy() const { return _heap_sizing_policy; }
 993   // The current policy object for the collector.
 994   G1Policy* policy() const { return _policy; }
 995   // The remembered set.
 996   G1RemSet* rem_set() const { return _rem_set; }
 997 
 998   inline G1GCPhaseTimes* phase_times() const;
 999 
1000   HeapRegionManager* hrm() const { return _hrm; }
1001 
1002   const G1CollectionSet* collection_set() const { return &_collection_set; }
1003   G1CollectionSet* collection_set() { return &_collection_set; }
1004 
1005   virtual SoftRefPolicy* soft_ref_policy();
1006 
1007   virtual void initialize_serviceability();
1008   virtual MemoryUsage memory_usage();
1009   virtual GrowableArray<GCMemoryManager*> memory_managers();
1010   virtual GrowableArray<MemoryPool*> memory_pools();
1011 
1012   // Try to minimize the remembered set.


1068   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1069 
1070   MemoryUsage get_auxiliary_data_memory_usage() const {
1071     return _hrm->get_auxiliary_data_memory_usage();
1072   }
1073 
1074   // The number of regions that are not completely free.
1075   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1076 
1077 #ifdef ASSERT
1078   bool is_on_master_free_list(HeapRegion* hr) {
1079     return _hrm->is_free(hr);
1080   }
1081 #endif // ASSERT
1082 
1083   inline void old_set_add(HeapRegion* hr);
1084   inline void old_set_remove(HeapRegion* hr);
1085 
1086   inline void archive_set_add(HeapRegion* hr);
1087 
1088   size_t non_young_capacity_bytes() const {
1089     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1090   }
1091 
1092   // Determine whether the given region is one that we are using as an
1093   // old GC alloc region.
1094   bool is_old_gc_alloc_region(HeapRegion* hr);
1095 
1096   // Perform a collection of the heap; intended for use in implementing
1097   // "System.gc".  This probably implies as full a collection as the
1098   // "CollectedHeap" supports.
1099   virtual void collect(GCCause::Cause cause);
1100 
1101   // Perform a collection of the heap with the given cause.
1102   // Returns whether this collection actually executed.
1103   bool try_collect(GCCause::Cause cause);
1104 
1105   // True iff an evacuation has failed in the most-recent collection.
1106   bool evacuation_failed() { return _evacuation_failed; }
1107 
1108   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);


1269     // ensure that we don't try to allocate a TLAB as
1270     // humongous and that we don't allocate a humongous
1271     // object in a TLAB.
1272     return word_size > _humongous_object_threshold_in_words;
1273   }
1274 
1275   // Returns the humongous threshold for a specific region size
1276   static size_t humongous_threshold_for(size_t region_size) {
1277     return (region_size / 2);
1278   }
1279 
1280   // Returns the number of regions the humongous object of the given word size
1281   // requires.
1282   static size_t humongous_obj_size_in_regions(size_t word_size);
1283 
1284   // Print the maximum heap capacity.
1285   virtual size_t max_capacity() const;
1286 
1287   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1288   virtual size_t max_reserved_capacity() const;
1289 
1290   // Print the soft maximum heap capacity.
1291   size_t soft_max_capacity() const;
1292 
1293   virtual jlong millis_since_last_gc();
1294 
1295 
1296   // Convenience function to be used in situations where the heap type can be
1297   // asserted to be this type.
1298   static G1CollectedHeap* heap();
1299 
1300   void set_region_short_lived_locked(HeapRegion* hr);
1301   // add appropriate methods for any other surv rate groups
1302 
1303   const G1SurvivorRegions* survivor() const { return &_survivor; }
1304 
1305   uint eden_regions_count() const { return _eden.length(); }
1306   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1307   uint survivor_regions_count() const { return _survivor.length(); }
1308   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1309   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1310   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1311   uint young_regions_count() const { return _eden.length() + _survivor.length(); }