Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics


 764 
 765   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 766   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 767   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 768 
 769   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 770 
 771   // Actually do the work of evacuating the parts of the collection set.
 772   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 773   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 774 private:
 775   // Evacuate the next set of optional regions.
 776   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 777 
 778 public:
 779   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 780   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 781                                     G1RedirtyCardsQueueSet* rdcqs,
 782                                     G1ParScanThreadStateSet* pss);
 783 
 784   void expand_heap_after_young_collection();




 785   // Update object copying statistics.
 786   void record_obj_copy_mem_stats();
 787 
 788   // The hot card cache for remembered set insertion optimization.
 789   G1HotCardCache* _hot_card_cache;
 790 
 791   // The g1 remembered set of the heap.
 792   G1RemSet* _rem_set;
 793 
 794   // After a collection pause, convert the regions in the collection set into free
 795   // regions.
 796   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 797 
 798   // Abandon the current collection set without recording policy
 799   // statistics or updating free lists.
 800   void abandon_collection_set(G1CollectionSet* collection_set);
 801 
 802   // The concurrent marker (and the thread it runs in.)
 803   G1ConcurrentMark* _cm;
 804   G1ConcurrentMarkThread* _cm_thread;


1061   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1062 
1063   MemoryUsage get_auxiliary_data_memory_usage() const {
1064     return _hrm->get_auxiliary_data_memory_usage();
1065   }
1066 
1067   // The number of regions that are not completely free.
1068   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1069 
1070 #ifdef ASSERT
1071   bool is_on_master_free_list(HeapRegion* hr) {
1072     return _hrm->is_free(hr);
1073   }
1074 #endif // ASSERT
1075 
1076   inline void old_set_add(HeapRegion* hr);
1077   inline void old_set_remove(HeapRegion* hr);
1078 
1079   inline void archive_set_add(HeapRegion* hr);
1080 
1081   size_t non_young_capacity_bytes() {
1082     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1083   }
1084 
1085   // Determine whether the given region is one that we are using as an
1086   // old GC alloc region.
1087   bool is_old_gc_alloc_region(HeapRegion* hr);
1088 
1089   // Perform a collection of the heap; intended for use in implementing
1090   // "System.gc".  This probably implies as full a collection as the
1091   // "CollectedHeap" supports.
1092   virtual void collect(GCCause::Cause cause);
1093 
1094   // Perform a collection of the heap with the given cause.
1095   // Returns whether this collection actually executed.
1096   bool try_collect(GCCause::Cause cause);
1097 
1098   // True iff an evacuation has failed in the most-recent collection.
1099   bool evacuation_failed() { return _evacuation_failed; }
1100 
1101   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);


1262     // ensure that we don't try to allocate a TLAB as
1263     // humongous and that we don't allocate a humongous
1264     // object in a TLAB.
1265     return word_size > _humongous_object_threshold_in_words;
1266   }
1267 
1268   // Returns the humongous threshold for a specific region size
1269   static size_t humongous_threshold_for(size_t region_size) {
1270     return (region_size / 2);
1271   }
1272 
1273   // Returns the number of regions the humongous object of the given word size
1274   // requires.
1275   static size_t humongous_obj_size_in_regions(size_t word_size);
1276 
1277   // Print the maximum heap capacity.
1278   virtual size_t max_capacity() const;
1279 
1280   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1281   virtual size_t max_reserved_capacity() const;



1282 
1283   virtual jlong millis_since_last_gc();
1284 
1285 
1286   // Convenience function to be used in situations where the heap type can be
1287   // asserted to be this type.
1288   static G1CollectedHeap* heap();
1289 
1290   void set_region_short_lived_locked(HeapRegion* hr);
1291   // add appropriate methods for any other surv rate groups
1292 
1293   const G1SurvivorRegions* survivor() const { return &_survivor; }
1294 
1295   uint eden_regions_count() const { return _eden.length(); }
1296   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1297   uint survivor_regions_count() const { return _survivor.length(); }
1298   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1299   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1300   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1301   uint young_regions_count() const { return _eden.length() + _survivor.length(); }




 764 
 765   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 766   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 767   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 768 
 769   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 770 
 771   // Actually do the work of evacuating the parts of the collection set.
 772   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
 773   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 774 private:
 775   // Evacuate the next set of optional regions.
 776   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 777 
 778 public:
 779   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 780   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 781                                     G1RedirtyCardsQueueSet* rdcqs,
 782                                     G1ParScanThreadStateSet* pss);
 783 
 784   void resize_heap_after_young_collection();
 785   bool expand_heap_after_young_collection();
 786   void shrink_heap_after_young_collection();
 787   void expand_heap_after_concurrent_mark();
 788 
 789   // Update object copying statistics.
 790   void record_obj_copy_mem_stats();
 791 
 792   // The hot card cache for remembered set insertion optimization.
 793   G1HotCardCache* _hot_card_cache;
 794 
 795   // The g1 remembered set of the heap.
 796   G1RemSet* _rem_set;
 797 
 798   // After a collection pause, convert the regions in the collection set into free
 799   // regions.
 800   void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
 801 
 802   // Abandon the current collection set without recording policy
 803   // statistics or updating free lists.
 804   void abandon_collection_set(G1CollectionSet* collection_set);
 805 
 806   // The concurrent marker (and the thread it runs in.)
 807   G1ConcurrentMark* _cm;
 808   G1ConcurrentMarkThread* _cm_thread;


1065   uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1066 
1067   MemoryUsage get_auxiliary_data_memory_usage() const {
1068     return _hrm->get_auxiliary_data_memory_usage();
1069   }
1070 
1071   // The number of regions that are not completely free.
1072   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1073 
1074 #ifdef ASSERT
1075   bool is_on_master_free_list(HeapRegion* hr) {
1076     return _hrm->is_free(hr);
1077   }
1078 #endif // ASSERT
1079 
1080   inline void old_set_add(HeapRegion* hr);
1081   inline void old_set_remove(HeapRegion* hr);
1082 
1083   inline void archive_set_add(HeapRegion* hr);
1084 
1085   size_t non_young_capacity_bytes() const {
1086     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1087   }
1088 
1089   // Determine whether the given region is one that we are using as an
1090   // old GC alloc region.
1091   bool is_old_gc_alloc_region(HeapRegion* hr);
1092 
1093   // Perform a collection of the heap; intended for use in implementing
1094   // "System.gc".  This probably implies as full a collection as the
1095   // "CollectedHeap" supports.
1096   virtual void collect(GCCause::Cause cause);
1097 
1098   // Perform a collection of the heap with the given cause.
1099   // Returns whether this collection actually executed.
1100   bool try_collect(GCCause::Cause cause);
1101 
1102   // True iff an evacuation has failed in the most-recent collection.
1103   bool evacuation_failed() { return _evacuation_failed; }
1104 
1105   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);


1266     // ensure that we don't try to allocate a TLAB as
1267     // humongous and that we don't allocate a humongous
1268     // object in a TLAB.
1269     return word_size > _humongous_object_threshold_in_words;
1270   }
1271 
1272   // Returns the humongous threshold for a specific region size
1273   static size_t humongous_threshold_for(size_t region_size) {
1274     return (region_size / 2);
1275   }
1276 
1277   // Returns the number of regions the humongous object of the given word size
1278   // requires.
1279   static size_t humongous_obj_size_in_regions(size_t word_size);
1280 
1281   // Print the maximum heap capacity.
1282   virtual size_t max_capacity() const;
1283 
1284   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1285   virtual size_t max_reserved_capacity() const;
1286 
1287   // Print the soft maximum heap capacity.
1288   size_t soft_max_capacity() const;
1289 
1290   virtual jlong millis_since_last_gc();
1291 
1292 
1293   // Convenience function to be used in situations where the heap type can be
1294   // asserted to be this type.
1295   static G1CollectedHeap* heap();
1296 
1297   void set_region_short_lived_locked(HeapRegion* hr);
1298   // add appropriate methods for any other surv rate groups
1299 
1300   const G1SurvivorRegions* survivor() const { return &_survivor; }
1301 
1302   uint eden_regions_count() const { return _eden.length(); }
1303   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1304   uint survivor_regions_count() const { return _survivor.length(); }
1305   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1306   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1307   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1308   uint young_regions_count() const { return _eden.length() + _survivor.length(); }